github.com/riscv/riscv-go@v0.0.0-20200123204226-124ebd6fcc8e/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // autogenerated from gen/AMD64.rules: do not edit! 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 8 var _ = math.MinInt8 // in case not otherwise used 9 func rewriteValueAMD64(v *Value, config *Config) bool { 10 switch v.Op { 11 case OpAMD64ADDL: 12 return rewriteValueAMD64_OpAMD64ADDL(v, config) 13 case OpAMD64ADDLconst: 14 return rewriteValueAMD64_OpAMD64ADDLconst(v, config) 15 case OpAMD64ADDQ: 16 return rewriteValueAMD64_OpAMD64ADDQ(v, config) 17 case OpAMD64ADDQconst: 18 return rewriteValueAMD64_OpAMD64ADDQconst(v, config) 19 case OpAMD64ANDL: 20 return rewriteValueAMD64_OpAMD64ANDL(v, config) 21 case OpAMD64ANDLconst: 22 return rewriteValueAMD64_OpAMD64ANDLconst(v, config) 23 case OpAMD64ANDQ: 24 return rewriteValueAMD64_OpAMD64ANDQ(v, config) 25 case OpAMD64ANDQconst: 26 return rewriteValueAMD64_OpAMD64ANDQconst(v, config) 27 case OpAMD64CMPB: 28 return rewriteValueAMD64_OpAMD64CMPB(v, config) 29 case OpAMD64CMPBconst: 30 return rewriteValueAMD64_OpAMD64CMPBconst(v, config) 31 case OpAMD64CMPL: 32 return rewriteValueAMD64_OpAMD64CMPL(v, config) 33 case OpAMD64CMPLconst: 34 return rewriteValueAMD64_OpAMD64CMPLconst(v, config) 35 case OpAMD64CMPQ: 36 return rewriteValueAMD64_OpAMD64CMPQ(v, config) 37 case OpAMD64CMPQconst: 38 return rewriteValueAMD64_OpAMD64CMPQconst(v, config) 39 case OpAMD64CMPW: 40 return rewriteValueAMD64_OpAMD64CMPW(v, config) 41 case OpAMD64CMPWconst: 42 return rewriteValueAMD64_OpAMD64CMPWconst(v, config) 43 case OpAMD64CMPXCHGLlock: 44 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v, config) 45 case OpAMD64CMPXCHGQlock: 46 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v, config) 47 case OpAMD64LEAL: 48 return rewriteValueAMD64_OpAMD64LEAL(v, config) 49 case OpAMD64LEAQ: 50 return rewriteValueAMD64_OpAMD64LEAQ(v, config) 51 case OpAMD64LEAQ1: 52 return rewriteValueAMD64_OpAMD64LEAQ1(v, config) 53 case OpAMD64LEAQ2: 54 return rewriteValueAMD64_OpAMD64LEAQ2(v, config) 55 case OpAMD64LEAQ4: 56 return rewriteValueAMD64_OpAMD64LEAQ4(v, config) 57 case OpAMD64LEAQ8: 58 return rewriteValueAMD64_OpAMD64LEAQ8(v, config) 59 case OpAMD64MOVBQSX: 60 return rewriteValueAMD64_OpAMD64MOVBQSX(v, config) 61 case OpAMD64MOVBQSXload: 62 return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config) 63 case OpAMD64MOVBQZX: 64 return rewriteValueAMD64_OpAMD64MOVBQZX(v, config) 65 case OpAMD64MOVBload: 66 return rewriteValueAMD64_OpAMD64MOVBload(v, config) 67 case OpAMD64MOVBloadidx1: 68 return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config) 69 case OpAMD64MOVBstore: 70 return rewriteValueAMD64_OpAMD64MOVBstore(v, config) 71 case OpAMD64MOVBstoreconst: 72 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config) 73 case OpAMD64MOVBstoreconstidx1: 74 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config) 75 case OpAMD64MOVBstoreidx1: 76 return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config) 77 case OpAMD64MOVLQSX: 78 return rewriteValueAMD64_OpAMD64MOVLQSX(v, config) 79 case OpAMD64MOVLQSXload: 80 return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config) 81 case OpAMD64MOVLQZX: 82 return rewriteValueAMD64_OpAMD64MOVLQZX(v, config) 83 case OpAMD64MOVLatomicload: 84 return rewriteValueAMD64_OpAMD64MOVLatomicload(v, config) 85 case OpAMD64MOVLload: 86 return rewriteValueAMD64_OpAMD64MOVLload(v, config) 87 case OpAMD64MOVLloadidx1: 88 return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config) 89 case OpAMD64MOVLloadidx4: 90 return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config) 91 case OpAMD64MOVLstore: 92 return rewriteValueAMD64_OpAMD64MOVLstore(v, config) 93 case OpAMD64MOVLstoreconst: 94 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config) 95 case OpAMD64MOVLstoreconstidx1: 96 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config) 97 case OpAMD64MOVLstoreconstidx4: 98 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config) 99 case OpAMD64MOVLstoreidx1: 100 return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config) 101 case OpAMD64MOVLstoreidx4: 102 return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config) 103 case OpAMD64MOVOload: 104 return rewriteValueAMD64_OpAMD64MOVOload(v, config) 105 case OpAMD64MOVOstore: 106 return rewriteValueAMD64_OpAMD64MOVOstore(v, config) 107 case OpAMD64MOVQatomicload: 108 return rewriteValueAMD64_OpAMD64MOVQatomicload(v, config) 109 case OpAMD64MOVQload: 110 return rewriteValueAMD64_OpAMD64MOVQload(v, config) 111 case OpAMD64MOVQloadidx1: 112 return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config) 113 case OpAMD64MOVQloadidx8: 114 return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config) 115 case OpAMD64MOVQstore: 116 return rewriteValueAMD64_OpAMD64MOVQstore(v, config) 117 case OpAMD64MOVQstoreconst: 118 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config) 119 case OpAMD64MOVQstoreconstidx1: 120 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config) 121 case OpAMD64MOVQstoreconstidx8: 122 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config) 123 case OpAMD64MOVQstoreidx1: 124 return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config) 125 case OpAMD64MOVQstoreidx8: 126 return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config) 127 case OpAMD64MOVSDload: 128 return rewriteValueAMD64_OpAMD64MOVSDload(v, config) 129 case OpAMD64MOVSDloadidx1: 130 return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config) 131 case OpAMD64MOVSDloadidx8: 132 return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config) 133 case OpAMD64MOVSDstore: 134 return rewriteValueAMD64_OpAMD64MOVSDstore(v, config) 135 case OpAMD64MOVSDstoreidx1: 136 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config) 137 case OpAMD64MOVSDstoreidx8: 138 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config) 139 case OpAMD64MOVSSload: 140 return rewriteValueAMD64_OpAMD64MOVSSload(v, config) 141 case OpAMD64MOVSSloadidx1: 142 return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config) 143 case OpAMD64MOVSSloadidx4: 144 return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config) 145 case OpAMD64MOVSSstore: 146 return rewriteValueAMD64_OpAMD64MOVSSstore(v, config) 147 case OpAMD64MOVSSstoreidx1: 148 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config) 149 case OpAMD64MOVSSstoreidx4: 150 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config) 151 case OpAMD64MOVWQSX: 152 return rewriteValueAMD64_OpAMD64MOVWQSX(v, config) 153 case OpAMD64MOVWQSXload: 154 return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config) 155 case OpAMD64MOVWQZX: 156 return rewriteValueAMD64_OpAMD64MOVWQZX(v, config) 157 case OpAMD64MOVWload: 158 return rewriteValueAMD64_OpAMD64MOVWload(v, config) 159 case OpAMD64MOVWloadidx1: 160 return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config) 161 case OpAMD64MOVWloadidx2: 162 return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config) 163 case OpAMD64MOVWstore: 164 return rewriteValueAMD64_OpAMD64MOVWstore(v, config) 165 case OpAMD64MOVWstoreconst: 166 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config) 167 case OpAMD64MOVWstoreconstidx1: 168 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config) 169 case OpAMD64MOVWstoreconstidx2: 170 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config) 171 case OpAMD64MOVWstoreidx1: 172 return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config) 173 case OpAMD64MOVWstoreidx2: 174 return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config) 175 case OpAMD64MULL: 176 return rewriteValueAMD64_OpAMD64MULL(v, config) 177 case OpAMD64MULLconst: 178 return rewriteValueAMD64_OpAMD64MULLconst(v, config) 179 case OpAMD64MULQ: 180 return rewriteValueAMD64_OpAMD64MULQ(v, config) 181 case OpAMD64MULQconst: 182 return rewriteValueAMD64_OpAMD64MULQconst(v, config) 183 case OpAMD64NEGL: 184 return rewriteValueAMD64_OpAMD64NEGL(v, config) 185 case OpAMD64NEGQ: 186 return rewriteValueAMD64_OpAMD64NEGQ(v, config) 187 case OpAMD64NOTL: 188 return rewriteValueAMD64_OpAMD64NOTL(v, config) 189 case OpAMD64NOTQ: 190 return rewriteValueAMD64_OpAMD64NOTQ(v, config) 191 case OpAMD64ORL: 192 return rewriteValueAMD64_OpAMD64ORL(v, config) 193 case OpAMD64ORLconst: 194 return rewriteValueAMD64_OpAMD64ORLconst(v, config) 195 case OpAMD64ORQ: 196 return rewriteValueAMD64_OpAMD64ORQ(v, config) 197 case OpAMD64ORQconst: 198 return rewriteValueAMD64_OpAMD64ORQconst(v, config) 199 case OpAMD64ROLBconst: 200 return rewriteValueAMD64_OpAMD64ROLBconst(v, config) 201 case OpAMD64ROLLconst: 202 return rewriteValueAMD64_OpAMD64ROLLconst(v, config) 203 case OpAMD64ROLQconst: 204 return rewriteValueAMD64_OpAMD64ROLQconst(v, config) 205 case OpAMD64ROLWconst: 206 return rewriteValueAMD64_OpAMD64ROLWconst(v, config) 207 case OpAMD64SARB: 208 return rewriteValueAMD64_OpAMD64SARB(v, config) 209 case OpAMD64SARBconst: 210 return rewriteValueAMD64_OpAMD64SARBconst(v, config) 211 case OpAMD64SARL: 212 return rewriteValueAMD64_OpAMD64SARL(v, config) 213 case OpAMD64SARLconst: 214 return rewriteValueAMD64_OpAMD64SARLconst(v, config) 215 case OpAMD64SARQ: 216 return rewriteValueAMD64_OpAMD64SARQ(v, config) 217 case OpAMD64SARQconst: 218 return rewriteValueAMD64_OpAMD64SARQconst(v, config) 219 case OpAMD64SARW: 220 return rewriteValueAMD64_OpAMD64SARW(v, config) 221 case OpAMD64SARWconst: 222 return rewriteValueAMD64_OpAMD64SARWconst(v, config) 223 case OpAMD64SBBLcarrymask: 224 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config) 225 case OpAMD64SBBQcarrymask: 226 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config) 227 case OpAMD64SETA: 228 return rewriteValueAMD64_OpAMD64SETA(v, config) 229 case OpAMD64SETAE: 230 return rewriteValueAMD64_OpAMD64SETAE(v, config) 231 case OpAMD64SETB: 232 return rewriteValueAMD64_OpAMD64SETB(v, config) 233 case OpAMD64SETBE: 234 return rewriteValueAMD64_OpAMD64SETBE(v, config) 235 case OpAMD64SETEQ: 236 return rewriteValueAMD64_OpAMD64SETEQ(v, config) 237 case OpAMD64SETG: 238 return rewriteValueAMD64_OpAMD64SETG(v, config) 239 case OpAMD64SETGE: 240 return rewriteValueAMD64_OpAMD64SETGE(v, config) 241 case OpAMD64SETL: 242 return rewriteValueAMD64_OpAMD64SETL(v, config) 243 case OpAMD64SETLE: 244 return rewriteValueAMD64_OpAMD64SETLE(v, config) 245 case OpAMD64SETNE: 246 return rewriteValueAMD64_OpAMD64SETNE(v, config) 247 case OpAMD64SHLL: 248 return rewriteValueAMD64_OpAMD64SHLL(v, config) 249 case OpAMD64SHLLconst: 250 return rewriteValueAMD64_OpAMD64SHLLconst(v, config) 251 case OpAMD64SHLQ: 252 return rewriteValueAMD64_OpAMD64SHLQ(v, config) 253 case OpAMD64SHLQconst: 254 return rewriteValueAMD64_OpAMD64SHLQconst(v, config) 255 case OpAMD64SHRB: 256 return rewriteValueAMD64_OpAMD64SHRB(v, config) 257 case OpAMD64SHRBconst: 258 return rewriteValueAMD64_OpAMD64SHRBconst(v, config) 259 case OpAMD64SHRL: 260 return rewriteValueAMD64_OpAMD64SHRL(v, config) 261 case OpAMD64SHRLconst: 262 return rewriteValueAMD64_OpAMD64SHRLconst(v, config) 263 case OpAMD64SHRQ: 264 return rewriteValueAMD64_OpAMD64SHRQ(v, config) 265 case OpAMD64SHRQconst: 266 return rewriteValueAMD64_OpAMD64SHRQconst(v, config) 267 case OpAMD64SHRW: 268 return rewriteValueAMD64_OpAMD64SHRW(v, config) 269 case OpAMD64SHRWconst: 270 return rewriteValueAMD64_OpAMD64SHRWconst(v, config) 271 case OpAMD64SUBL: 272 return rewriteValueAMD64_OpAMD64SUBL(v, config) 273 case OpAMD64SUBLconst: 274 return rewriteValueAMD64_OpAMD64SUBLconst(v, config) 275 case OpAMD64SUBQ: 276 return rewriteValueAMD64_OpAMD64SUBQ(v, config) 277 case OpAMD64SUBQconst: 278 return rewriteValueAMD64_OpAMD64SUBQconst(v, config) 279 case OpAMD64XADDLlock: 280 return rewriteValueAMD64_OpAMD64XADDLlock(v, config) 281 case OpAMD64XADDQlock: 282 return rewriteValueAMD64_OpAMD64XADDQlock(v, config) 283 case OpAMD64XCHGL: 284 return rewriteValueAMD64_OpAMD64XCHGL(v, config) 285 case OpAMD64XCHGQ: 286 return rewriteValueAMD64_OpAMD64XCHGQ(v, config) 287 case OpAMD64XORL: 288 return rewriteValueAMD64_OpAMD64XORL(v, config) 289 case OpAMD64XORLconst: 290 return rewriteValueAMD64_OpAMD64XORLconst(v, config) 291 case OpAMD64XORQ: 292 return rewriteValueAMD64_OpAMD64XORQ(v, config) 293 case OpAMD64XORQconst: 294 return rewriteValueAMD64_OpAMD64XORQconst(v, config) 295 case OpAdd16: 296 return rewriteValueAMD64_OpAdd16(v, config) 297 case OpAdd32: 298 return rewriteValueAMD64_OpAdd32(v, config) 299 case OpAdd32F: 300 return rewriteValueAMD64_OpAdd32F(v, config) 301 case OpAdd64: 302 return rewriteValueAMD64_OpAdd64(v, config) 303 case OpAdd64F: 304 return rewriteValueAMD64_OpAdd64F(v, config) 305 case OpAdd8: 306 return rewriteValueAMD64_OpAdd8(v, config) 307 case OpAddPtr: 308 return rewriteValueAMD64_OpAddPtr(v, config) 309 case OpAddr: 310 return rewriteValueAMD64_OpAddr(v, config) 311 case OpAnd16: 312 return rewriteValueAMD64_OpAnd16(v, config) 313 case OpAnd32: 314 return rewriteValueAMD64_OpAnd32(v, config) 315 case OpAnd64: 316 return rewriteValueAMD64_OpAnd64(v, config) 317 case OpAnd8: 318 return rewriteValueAMD64_OpAnd8(v, config) 319 case OpAndB: 320 return rewriteValueAMD64_OpAndB(v, config) 321 case OpAtomicAdd32: 322 return rewriteValueAMD64_OpAtomicAdd32(v, config) 323 case OpAtomicAdd64: 324 return rewriteValueAMD64_OpAtomicAdd64(v, config) 325 case OpAtomicAnd8: 326 return rewriteValueAMD64_OpAtomicAnd8(v, config) 327 case OpAtomicCompareAndSwap32: 328 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v, config) 329 case OpAtomicCompareAndSwap64: 330 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v, config) 331 case OpAtomicExchange32: 332 return rewriteValueAMD64_OpAtomicExchange32(v, config) 333 case OpAtomicExchange64: 334 return rewriteValueAMD64_OpAtomicExchange64(v, config) 335 case OpAtomicLoad32: 336 return rewriteValueAMD64_OpAtomicLoad32(v, config) 337 case OpAtomicLoad64: 338 return rewriteValueAMD64_OpAtomicLoad64(v, config) 339 case OpAtomicLoadPtr: 340 return rewriteValueAMD64_OpAtomicLoadPtr(v, config) 341 case OpAtomicOr8: 342 return rewriteValueAMD64_OpAtomicOr8(v, config) 343 case OpAtomicStore32: 344 return rewriteValueAMD64_OpAtomicStore32(v, config) 345 case OpAtomicStore64: 346 return rewriteValueAMD64_OpAtomicStore64(v, config) 347 case OpAtomicStorePtrNoWB: 348 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v, config) 349 case OpAvg64u: 350 return rewriteValueAMD64_OpAvg64u(v, config) 351 case OpBswap32: 352 return rewriteValueAMD64_OpBswap32(v, config) 353 case OpBswap64: 354 return rewriteValueAMD64_OpBswap64(v, config) 355 case OpClosureCall: 356 return rewriteValueAMD64_OpClosureCall(v, config) 357 case OpCom16: 358 return rewriteValueAMD64_OpCom16(v, config) 359 case OpCom32: 360 return rewriteValueAMD64_OpCom32(v, config) 361 case OpCom64: 362 return rewriteValueAMD64_OpCom64(v, config) 363 case OpCom8: 364 return rewriteValueAMD64_OpCom8(v, config) 365 case OpConst16: 366 return rewriteValueAMD64_OpConst16(v, config) 367 case OpConst32: 368 return rewriteValueAMD64_OpConst32(v, config) 369 case OpConst32F: 370 return rewriteValueAMD64_OpConst32F(v, config) 371 case OpConst64: 372 return rewriteValueAMD64_OpConst64(v, config) 373 case OpConst64F: 374 return rewriteValueAMD64_OpConst64F(v, config) 375 case OpConst8: 376 return rewriteValueAMD64_OpConst8(v, config) 377 case OpConstBool: 378 return rewriteValueAMD64_OpConstBool(v, config) 379 case OpConstNil: 380 return rewriteValueAMD64_OpConstNil(v, config) 381 case OpConvert: 382 return rewriteValueAMD64_OpConvert(v, config) 383 case OpCtz32: 384 return rewriteValueAMD64_OpCtz32(v, config) 385 case OpCtz64: 386 return rewriteValueAMD64_OpCtz64(v, config) 387 case OpCvt32Fto32: 388 return rewriteValueAMD64_OpCvt32Fto32(v, config) 389 case OpCvt32Fto64: 390 return rewriteValueAMD64_OpCvt32Fto64(v, config) 391 case OpCvt32Fto64F: 392 return rewriteValueAMD64_OpCvt32Fto64F(v, config) 393 case OpCvt32to32F: 394 return rewriteValueAMD64_OpCvt32to32F(v, config) 395 case OpCvt32to64F: 396 return rewriteValueAMD64_OpCvt32to64F(v, config) 397 case OpCvt64Fto32: 398 return rewriteValueAMD64_OpCvt64Fto32(v, config) 399 case OpCvt64Fto32F: 400 return rewriteValueAMD64_OpCvt64Fto32F(v, config) 401 case OpCvt64Fto64: 402 return rewriteValueAMD64_OpCvt64Fto64(v, config) 403 case OpCvt64to32F: 404 return rewriteValueAMD64_OpCvt64to32F(v, config) 405 case OpCvt64to64F: 406 return rewriteValueAMD64_OpCvt64to64F(v, config) 407 case OpDeferCall: 408 return rewriteValueAMD64_OpDeferCall(v, config) 409 case OpDiv128u: 410 return rewriteValueAMD64_OpDiv128u(v, config) 411 case OpDiv16: 412 return rewriteValueAMD64_OpDiv16(v, config) 413 case OpDiv16u: 414 return rewriteValueAMD64_OpDiv16u(v, config) 415 case OpDiv32: 416 return rewriteValueAMD64_OpDiv32(v, config) 417 case OpDiv32F: 418 return rewriteValueAMD64_OpDiv32F(v, config) 419 case OpDiv32u: 420 return rewriteValueAMD64_OpDiv32u(v, config) 421 case OpDiv64: 422 return rewriteValueAMD64_OpDiv64(v, config) 423 case OpDiv64F: 424 return rewriteValueAMD64_OpDiv64F(v, config) 425 case OpDiv64u: 426 return rewriteValueAMD64_OpDiv64u(v, config) 427 case OpDiv8: 428 return rewriteValueAMD64_OpDiv8(v, config) 429 case OpDiv8u: 430 return rewriteValueAMD64_OpDiv8u(v, config) 431 case OpEq16: 432 return rewriteValueAMD64_OpEq16(v, config) 433 case OpEq32: 434 return rewriteValueAMD64_OpEq32(v, config) 435 case OpEq32F: 436 return rewriteValueAMD64_OpEq32F(v, config) 437 case OpEq64: 438 return rewriteValueAMD64_OpEq64(v, config) 439 case OpEq64F: 440 return rewriteValueAMD64_OpEq64F(v, config) 441 case OpEq8: 442 return rewriteValueAMD64_OpEq8(v, config) 443 case OpEqB: 444 return rewriteValueAMD64_OpEqB(v, config) 445 case OpEqPtr: 446 return rewriteValueAMD64_OpEqPtr(v, config) 447 case OpGeq16: 448 return rewriteValueAMD64_OpGeq16(v, config) 449 case OpGeq16U: 450 return rewriteValueAMD64_OpGeq16U(v, config) 451 case OpGeq32: 452 return rewriteValueAMD64_OpGeq32(v, config) 453 case OpGeq32F: 454 return rewriteValueAMD64_OpGeq32F(v, config) 455 case OpGeq32U: 456 return rewriteValueAMD64_OpGeq32U(v, config) 457 case OpGeq64: 458 return rewriteValueAMD64_OpGeq64(v, config) 459 case OpGeq64F: 460 return rewriteValueAMD64_OpGeq64F(v, config) 461 case OpGeq64U: 462 return rewriteValueAMD64_OpGeq64U(v, config) 463 case OpGeq8: 464 return rewriteValueAMD64_OpGeq8(v, config) 465 case OpGeq8U: 466 return rewriteValueAMD64_OpGeq8U(v, config) 467 case OpGetClosurePtr: 468 return rewriteValueAMD64_OpGetClosurePtr(v, config) 469 case OpGetG: 470 return rewriteValueAMD64_OpGetG(v, config) 471 case OpGoCall: 472 return rewriteValueAMD64_OpGoCall(v, config) 473 case OpGreater16: 474 return rewriteValueAMD64_OpGreater16(v, config) 475 case OpGreater16U: 476 return rewriteValueAMD64_OpGreater16U(v, config) 477 case OpGreater32: 478 return rewriteValueAMD64_OpGreater32(v, config) 479 case OpGreater32F: 480 return rewriteValueAMD64_OpGreater32F(v, config) 481 case OpGreater32U: 482 return rewriteValueAMD64_OpGreater32U(v, config) 483 case OpGreater64: 484 return rewriteValueAMD64_OpGreater64(v, config) 485 case OpGreater64F: 486 return rewriteValueAMD64_OpGreater64F(v, config) 487 case OpGreater64U: 488 return rewriteValueAMD64_OpGreater64U(v, config) 489 case OpGreater8: 490 return rewriteValueAMD64_OpGreater8(v, config) 491 case OpGreater8U: 492 return rewriteValueAMD64_OpGreater8U(v, config) 493 case OpHmul16: 494 return rewriteValueAMD64_OpHmul16(v, config) 495 case OpHmul16u: 496 return rewriteValueAMD64_OpHmul16u(v, config) 497 case OpHmul32: 498 return rewriteValueAMD64_OpHmul32(v, config) 499 case OpHmul32u: 500 return rewriteValueAMD64_OpHmul32u(v, config) 501 case OpHmul64: 502 return rewriteValueAMD64_OpHmul64(v, config) 503 case OpHmul64u: 504 return rewriteValueAMD64_OpHmul64u(v, config) 505 case OpHmul8: 506 return rewriteValueAMD64_OpHmul8(v, config) 507 case OpHmul8u: 508 return rewriteValueAMD64_OpHmul8u(v, config) 509 case OpInt64Hi: 510 return rewriteValueAMD64_OpInt64Hi(v, config) 511 case OpInterCall: 512 return rewriteValueAMD64_OpInterCall(v, config) 513 case OpIsInBounds: 514 return rewriteValueAMD64_OpIsInBounds(v, config) 515 case OpIsNonNil: 516 return rewriteValueAMD64_OpIsNonNil(v, config) 517 case OpIsSliceInBounds: 518 return rewriteValueAMD64_OpIsSliceInBounds(v, config) 519 case OpLeq16: 520 return rewriteValueAMD64_OpLeq16(v, config) 521 case OpLeq16U: 522 return rewriteValueAMD64_OpLeq16U(v, config) 523 case OpLeq32: 524 return rewriteValueAMD64_OpLeq32(v, config) 525 case OpLeq32F: 526 return rewriteValueAMD64_OpLeq32F(v, config) 527 case OpLeq32U: 528 return rewriteValueAMD64_OpLeq32U(v, config) 529 case OpLeq64: 530 return rewriteValueAMD64_OpLeq64(v, config) 531 case OpLeq64F: 532 return rewriteValueAMD64_OpLeq64F(v, config) 533 case OpLeq64U: 534 return rewriteValueAMD64_OpLeq64U(v, config) 535 case OpLeq8: 536 return rewriteValueAMD64_OpLeq8(v, config) 537 case OpLeq8U: 538 return rewriteValueAMD64_OpLeq8U(v, config) 539 case OpLess16: 540 return rewriteValueAMD64_OpLess16(v, config) 541 case OpLess16U: 542 return rewriteValueAMD64_OpLess16U(v, config) 543 case OpLess32: 544 return rewriteValueAMD64_OpLess32(v, config) 545 case OpLess32F: 546 return rewriteValueAMD64_OpLess32F(v, config) 547 case OpLess32U: 548 return rewriteValueAMD64_OpLess32U(v, config) 549 case OpLess64: 550 return rewriteValueAMD64_OpLess64(v, config) 551 case OpLess64F: 552 return rewriteValueAMD64_OpLess64F(v, config) 553 case OpLess64U: 554 return rewriteValueAMD64_OpLess64U(v, config) 555 case OpLess8: 556 return rewriteValueAMD64_OpLess8(v, config) 557 case OpLess8U: 558 return rewriteValueAMD64_OpLess8U(v, config) 559 case OpLoad: 560 return rewriteValueAMD64_OpLoad(v, config) 561 case OpLsh16x16: 562 return rewriteValueAMD64_OpLsh16x16(v, config) 563 case OpLsh16x32: 564 return rewriteValueAMD64_OpLsh16x32(v, config) 565 case OpLsh16x64: 566 return rewriteValueAMD64_OpLsh16x64(v, config) 567 case OpLsh16x8: 568 return rewriteValueAMD64_OpLsh16x8(v, config) 569 case OpLsh32x16: 570 return rewriteValueAMD64_OpLsh32x16(v, config) 571 case OpLsh32x32: 572 return rewriteValueAMD64_OpLsh32x32(v, config) 573 case OpLsh32x64: 574 return rewriteValueAMD64_OpLsh32x64(v, config) 575 case OpLsh32x8: 576 return rewriteValueAMD64_OpLsh32x8(v, config) 577 case OpLsh64x16: 578 return rewriteValueAMD64_OpLsh64x16(v, config) 579 case OpLsh64x32: 580 return rewriteValueAMD64_OpLsh64x32(v, config) 581 case OpLsh64x64: 582 return rewriteValueAMD64_OpLsh64x64(v, config) 583 case OpLsh64x8: 584 return rewriteValueAMD64_OpLsh64x8(v, config) 585 case OpLsh8x16: 586 return rewriteValueAMD64_OpLsh8x16(v, config) 587 case OpLsh8x32: 588 return rewriteValueAMD64_OpLsh8x32(v, config) 589 case OpLsh8x64: 590 return rewriteValueAMD64_OpLsh8x64(v, config) 591 case OpLsh8x8: 592 return rewriteValueAMD64_OpLsh8x8(v, config) 593 case OpMod16: 594 return rewriteValueAMD64_OpMod16(v, config) 595 case OpMod16u: 596 return rewriteValueAMD64_OpMod16u(v, config) 597 case OpMod32: 598 return rewriteValueAMD64_OpMod32(v, config) 599 case OpMod32u: 600 return rewriteValueAMD64_OpMod32u(v, config) 601 case OpMod64: 602 return rewriteValueAMD64_OpMod64(v, config) 603 case OpMod64u: 604 return rewriteValueAMD64_OpMod64u(v, config) 605 case OpMod8: 606 return rewriteValueAMD64_OpMod8(v, config) 607 case OpMod8u: 608 return rewriteValueAMD64_OpMod8u(v, config) 609 case OpMove: 610 return rewriteValueAMD64_OpMove(v, config) 611 case OpMul16: 612 return rewriteValueAMD64_OpMul16(v, config) 613 case OpMul32: 614 return rewriteValueAMD64_OpMul32(v, config) 615 case OpMul32F: 616 return rewriteValueAMD64_OpMul32F(v, config) 617 case OpMul64: 618 return rewriteValueAMD64_OpMul64(v, config) 619 case OpMul64F: 620 return rewriteValueAMD64_OpMul64F(v, config) 621 case OpMul64uhilo: 622 return rewriteValueAMD64_OpMul64uhilo(v, config) 623 case OpMul8: 624 return rewriteValueAMD64_OpMul8(v, config) 625 case OpNeg16: 626 return rewriteValueAMD64_OpNeg16(v, config) 627 case OpNeg32: 628 return rewriteValueAMD64_OpNeg32(v, config) 629 case OpNeg32F: 630 return rewriteValueAMD64_OpNeg32F(v, config) 631 case OpNeg64: 632 return rewriteValueAMD64_OpNeg64(v, config) 633 case OpNeg64F: 634 return rewriteValueAMD64_OpNeg64F(v, config) 635 case OpNeg8: 636 return rewriteValueAMD64_OpNeg8(v, config) 637 case OpNeq16: 638 return rewriteValueAMD64_OpNeq16(v, config) 639 case OpNeq32: 640 return rewriteValueAMD64_OpNeq32(v, config) 641 case OpNeq32F: 642 return rewriteValueAMD64_OpNeq32F(v, config) 643 case OpNeq64: 644 return rewriteValueAMD64_OpNeq64(v, config) 645 case OpNeq64F: 646 return rewriteValueAMD64_OpNeq64F(v, config) 647 case OpNeq8: 648 return rewriteValueAMD64_OpNeq8(v, config) 649 case OpNeqB: 650 return rewriteValueAMD64_OpNeqB(v, config) 651 case OpNeqPtr: 652 return rewriteValueAMD64_OpNeqPtr(v, config) 653 case OpNilCheck: 654 return rewriteValueAMD64_OpNilCheck(v, config) 655 case OpNot: 656 return rewriteValueAMD64_OpNot(v, config) 657 case OpOffPtr: 658 return rewriteValueAMD64_OpOffPtr(v, config) 659 case OpOr16: 660 return rewriteValueAMD64_OpOr16(v, config) 661 case OpOr32: 662 return rewriteValueAMD64_OpOr32(v, config) 663 case OpOr64: 664 return rewriteValueAMD64_OpOr64(v, config) 665 case OpOr8: 666 return rewriteValueAMD64_OpOr8(v, config) 667 case OpOrB: 668 return rewriteValueAMD64_OpOrB(v, config) 669 case OpRsh16Ux16: 670 return rewriteValueAMD64_OpRsh16Ux16(v, config) 671 case OpRsh16Ux32: 672 return rewriteValueAMD64_OpRsh16Ux32(v, config) 673 case OpRsh16Ux64: 674 return rewriteValueAMD64_OpRsh16Ux64(v, config) 675 case OpRsh16Ux8: 676 return rewriteValueAMD64_OpRsh16Ux8(v, config) 677 case OpRsh16x16: 678 return rewriteValueAMD64_OpRsh16x16(v, config) 679 case OpRsh16x32: 680 return rewriteValueAMD64_OpRsh16x32(v, config) 681 case OpRsh16x64: 682 return rewriteValueAMD64_OpRsh16x64(v, config) 683 case OpRsh16x8: 684 return rewriteValueAMD64_OpRsh16x8(v, config) 685 case OpRsh32Ux16: 686 return rewriteValueAMD64_OpRsh32Ux16(v, config) 687 case OpRsh32Ux32: 688 return rewriteValueAMD64_OpRsh32Ux32(v, config) 689 case OpRsh32Ux64: 690 return rewriteValueAMD64_OpRsh32Ux64(v, config) 691 case OpRsh32Ux8: 692 return rewriteValueAMD64_OpRsh32Ux8(v, config) 693 case OpRsh32x16: 694 return rewriteValueAMD64_OpRsh32x16(v, config) 695 case OpRsh32x32: 696 return rewriteValueAMD64_OpRsh32x32(v, config) 697 case OpRsh32x64: 698 return rewriteValueAMD64_OpRsh32x64(v, config) 699 case OpRsh32x8: 700 return rewriteValueAMD64_OpRsh32x8(v, config) 701 case OpRsh64Ux16: 702 return rewriteValueAMD64_OpRsh64Ux16(v, config) 703 case OpRsh64Ux32: 704 return rewriteValueAMD64_OpRsh64Ux32(v, config) 705 case OpRsh64Ux64: 706 return rewriteValueAMD64_OpRsh64Ux64(v, config) 707 case OpRsh64Ux8: 708 return rewriteValueAMD64_OpRsh64Ux8(v, config) 709 case OpRsh64x16: 710 return rewriteValueAMD64_OpRsh64x16(v, config) 711 case OpRsh64x32: 712 return rewriteValueAMD64_OpRsh64x32(v, config) 713 case OpRsh64x64: 714 return rewriteValueAMD64_OpRsh64x64(v, config) 715 case OpRsh64x8: 716 return rewriteValueAMD64_OpRsh64x8(v, config) 717 case OpRsh8Ux16: 718 return rewriteValueAMD64_OpRsh8Ux16(v, config) 719 case OpRsh8Ux32: 720 return rewriteValueAMD64_OpRsh8Ux32(v, config) 721 case OpRsh8Ux64: 722 return rewriteValueAMD64_OpRsh8Ux64(v, config) 723 case OpRsh8Ux8: 724 return rewriteValueAMD64_OpRsh8Ux8(v, config) 725 case OpRsh8x16: 726 return rewriteValueAMD64_OpRsh8x16(v, config) 727 case OpRsh8x32: 728 return rewriteValueAMD64_OpRsh8x32(v, config) 729 case OpRsh8x64: 730 return rewriteValueAMD64_OpRsh8x64(v, config) 731 case OpRsh8x8: 732 return rewriteValueAMD64_OpRsh8x8(v, config) 733 case OpSelect0: 734 return rewriteValueAMD64_OpSelect0(v, config) 735 case OpSelect1: 736 return rewriteValueAMD64_OpSelect1(v, config) 737 case OpSignExt16to32: 738 return rewriteValueAMD64_OpSignExt16to32(v, config) 739 case OpSignExt16to64: 740 return rewriteValueAMD64_OpSignExt16to64(v, config) 741 case OpSignExt32to64: 742 return rewriteValueAMD64_OpSignExt32to64(v, config) 743 case OpSignExt8to16: 744 return rewriteValueAMD64_OpSignExt8to16(v, config) 745 case OpSignExt8to32: 746 return rewriteValueAMD64_OpSignExt8to32(v, config) 747 case OpSignExt8to64: 748 return rewriteValueAMD64_OpSignExt8to64(v, config) 749 case OpSlicemask: 750 return rewriteValueAMD64_OpSlicemask(v, config) 751 case OpSqrt: 752 return rewriteValueAMD64_OpSqrt(v, config) 753 case OpStaticCall: 754 return rewriteValueAMD64_OpStaticCall(v, config) 755 case OpStore: 756 return rewriteValueAMD64_OpStore(v, config) 757 case OpSub16: 758 return rewriteValueAMD64_OpSub16(v, config) 759 case OpSub32: 760 return rewriteValueAMD64_OpSub32(v, config) 761 case OpSub32F: 762 return rewriteValueAMD64_OpSub32F(v, config) 763 case OpSub64: 764 return rewriteValueAMD64_OpSub64(v, config) 765 case OpSub64F: 766 return rewriteValueAMD64_OpSub64F(v, config) 767 case OpSub8: 768 return rewriteValueAMD64_OpSub8(v, config) 769 case OpSubPtr: 770 return rewriteValueAMD64_OpSubPtr(v, config) 771 case OpTrunc16to8: 772 return rewriteValueAMD64_OpTrunc16to8(v, config) 773 case OpTrunc32to16: 774 return rewriteValueAMD64_OpTrunc32to16(v, config) 775 case OpTrunc32to8: 776 return rewriteValueAMD64_OpTrunc32to8(v, config) 777 case OpTrunc64to16: 778 return rewriteValueAMD64_OpTrunc64to16(v, config) 779 case OpTrunc64to32: 780 return rewriteValueAMD64_OpTrunc64to32(v, config) 781 case OpTrunc64to8: 782 return rewriteValueAMD64_OpTrunc64to8(v, config) 783 case OpXor16: 784 return rewriteValueAMD64_OpXor16(v, config) 785 case OpXor32: 786 return rewriteValueAMD64_OpXor32(v, config) 787 case OpXor64: 788 return rewriteValueAMD64_OpXor64(v, config) 789 case OpXor8: 790 return rewriteValueAMD64_OpXor8(v, config) 791 case OpZero: 792 return rewriteValueAMD64_OpZero(v, config) 793 case OpZeroExt16to32: 794 return rewriteValueAMD64_OpZeroExt16to32(v, config) 795 case OpZeroExt16to64: 796 return rewriteValueAMD64_OpZeroExt16to64(v, config) 797 case OpZeroExt32to64: 798 return rewriteValueAMD64_OpZeroExt32to64(v, config) 799 case OpZeroExt8to16: 800 return rewriteValueAMD64_OpZeroExt8to16(v, config) 801 case OpZeroExt8to32: 802 return rewriteValueAMD64_OpZeroExt8to32(v, config) 803 case OpZeroExt8to64: 804 return rewriteValueAMD64_OpZeroExt8to64(v, config) 805 } 806 return false 807 } 808 func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool { 809 b := v.Block 810 _ = b 811 // match: (ADDL x (MOVLconst [c])) 812 // cond: 813 // result: (ADDLconst [c] x) 814 for { 815 x := v.Args[0] 816 v_1 := v.Args[1] 817 if v_1.Op != OpAMD64MOVLconst { 818 break 819 } 820 c := v_1.AuxInt 821 v.reset(OpAMD64ADDLconst) 822 v.AuxInt = c 823 v.AddArg(x) 824 return true 825 } 826 // match: (ADDL (MOVLconst [c]) x) 827 // cond: 828 // result: (ADDLconst [c] x) 829 for { 830 v_0 := v.Args[0] 831 if v_0.Op != OpAMD64MOVLconst { 832 break 833 } 834 c := v_0.AuxInt 835 x := v.Args[1] 836 v.reset(OpAMD64ADDLconst) 837 v.AuxInt = c 838 v.AddArg(x) 839 return true 840 } 841 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [32-c])) 842 // cond: 843 // result: (ROLLconst x [ c]) 844 for { 845 v_0 := v.Args[0] 846 if v_0.Op != OpAMD64SHLLconst { 847 break 848 } 849 c := v_0.AuxInt 850 x := v_0.Args[0] 851 v_1 := v.Args[1] 852 if v_1.Op != OpAMD64SHRLconst { 853 break 854 } 855 if v_1.AuxInt != 32-c { 856 break 857 } 858 if x != v_1.Args[0] { 859 break 860 } 861 v.reset(OpAMD64ROLLconst) 862 v.AuxInt = c 863 v.AddArg(x) 864 return true 865 } 866 // match: (ADDL (SHRLconst x [c]) (SHLLconst x [32-c])) 867 // cond: 868 // result: (ROLLconst x [32-c]) 869 for { 870 v_0 := v.Args[0] 871 if v_0.Op != OpAMD64SHRLconst { 872 break 873 } 874 c := v_0.AuxInt 875 x := v_0.Args[0] 876 v_1 := v.Args[1] 877 if v_1.Op != OpAMD64SHLLconst { 878 break 879 } 880 if v_1.AuxInt != 32-c { 881 break 882 } 883 if x != v_1.Args[0] { 884 break 885 } 886 v.reset(OpAMD64ROLLconst) 887 v.AuxInt = 32 - c 888 v.AddArg(x) 889 return true 890 } 891 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [16-c])) 892 // cond: c < 16 && t.Size() == 2 893 // result: (ROLWconst x [ c]) 894 for { 895 t := v.Type 896 v_0 := v.Args[0] 897 if v_0.Op != OpAMD64SHLLconst { 898 break 899 } 900 c := v_0.AuxInt 901 x := v_0.Args[0] 902 v_1 := v.Args[1] 903 if v_1.Op != OpAMD64SHRWconst { 904 break 905 } 906 if v_1.AuxInt != 16-c { 907 break 908 } 909 if x != v_1.Args[0] { 910 break 911 } 912 if !(c < 16 && t.Size() == 2) { 913 break 914 } 915 v.reset(OpAMD64ROLWconst) 916 v.AuxInt = c 917 v.AddArg(x) 918 return true 919 } 920 // match: (ADDL <t> (SHRWconst x [c]) (SHLLconst x [16-c])) 921 // cond: c > 0 && t.Size() == 2 922 // result: (ROLWconst x [16-c]) 923 for { 924 t := v.Type 925 v_0 := v.Args[0] 926 if v_0.Op != OpAMD64SHRWconst { 927 break 928 } 929 c := v_0.AuxInt 930 x := v_0.Args[0] 931 v_1 := v.Args[1] 932 if v_1.Op != OpAMD64SHLLconst { 933 break 934 } 935 if v_1.AuxInt != 16-c { 936 break 937 } 938 if x != v_1.Args[0] { 939 break 940 } 941 if !(c > 0 && t.Size() == 2) { 942 break 943 } 944 v.reset(OpAMD64ROLWconst) 945 v.AuxInt = 16 - c 946 v.AddArg(x) 947 return true 948 } 949 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [ 8-c])) 950 // cond: c < 8 && t.Size() == 1 951 // result: (ROLBconst x [ c]) 952 for { 953 t := v.Type 954 v_0 := v.Args[0] 955 if v_0.Op != OpAMD64SHLLconst { 956 break 957 } 958 c := v_0.AuxInt 959 x := v_0.Args[0] 960 v_1 := v.Args[1] 961 if v_1.Op != OpAMD64SHRBconst { 962 break 963 } 964 if v_1.AuxInt != 8-c { 965 break 966 } 967 if x != v_1.Args[0] { 968 break 969 } 970 if !(c < 8 && t.Size() == 1) { 971 break 972 } 973 v.reset(OpAMD64ROLBconst) 974 v.AuxInt = c 975 v.AddArg(x) 976 return true 977 } 978 // match: (ADDL <t> (SHRBconst x [c]) (SHLLconst x [ 8-c])) 979 // cond: c > 0 && t.Size() == 1 980 // result: (ROLBconst x [ 8-c]) 981 for { 982 t := v.Type 983 v_0 := v.Args[0] 984 if v_0.Op != OpAMD64SHRBconst { 985 break 986 } 987 c := v_0.AuxInt 988 x := v_0.Args[0] 989 v_1 := v.Args[1] 990 if v_1.Op != OpAMD64SHLLconst { 991 break 992 } 993 if v_1.AuxInt != 8-c { 994 break 995 } 996 if x != v_1.Args[0] { 997 break 998 } 999 if !(c > 0 && t.Size() == 1) { 1000 break 1001 } 1002 v.reset(OpAMD64ROLBconst) 1003 v.AuxInt = 8 - c 1004 v.AddArg(x) 1005 return true 1006 } 1007 // match: (ADDL x (NEGL y)) 1008 // cond: 1009 // result: (SUBL x y) 1010 for { 1011 x := v.Args[0] 1012 v_1 := v.Args[1] 1013 if v_1.Op != OpAMD64NEGL { 1014 break 1015 } 1016 y := v_1.Args[0] 1017 v.reset(OpAMD64SUBL) 1018 v.AddArg(x) 1019 v.AddArg(y) 1020 return true 1021 } 1022 return false 1023 } 1024 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool { 1025 b := v.Block 1026 _ = b 1027 // match: (ADDLconst [c] x) 1028 // cond: int32(c)==0 1029 // result: x 1030 for { 1031 c := v.AuxInt 1032 x := v.Args[0] 1033 if !(int32(c) == 0) { 1034 break 1035 } 1036 v.reset(OpCopy) 1037 v.Type = x.Type 1038 v.AddArg(x) 1039 return true 1040 } 1041 // match: (ADDLconst [c] (MOVLconst [d])) 1042 // cond: 1043 // result: (MOVLconst [int64(int32(c+d))]) 1044 for { 1045 c := v.AuxInt 1046 v_0 := v.Args[0] 1047 if v_0.Op != OpAMD64MOVLconst { 1048 break 1049 } 1050 d := v_0.AuxInt 1051 v.reset(OpAMD64MOVLconst) 1052 v.AuxInt = int64(int32(c + d)) 1053 return true 1054 } 1055 // match: (ADDLconst [c] (ADDLconst [d] x)) 1056 // cond: 1057 // result: (ADDLconst [int64(int32(c+d))] x) 1058 for { 1059 c := v.AuxInt 1060 v_0 := v.Args[0] 1061 if v_0.Op != OpAMD64ADDLconst { 1062 break 1063 } 1064 d := v_0.AuxInt 1065 x := v_0.Args[0] 1066 v.reset(OpAMD64ADDLconst) 1067 v.AuxInt = int64(int32(c + d)) 1068 v.AddArg(x) 1069 return true 1070 } 1071 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1072 // cond: is32Bit(c+d) 1073 // result: (LEAL [c+d] {s} x) 1074 for { 1075 c := v.AuxInt 1076 v_0 := v.Args[0] 1077 if v_0.Op != OpAMD64LEAL { 1078 break 1079 } 1080 d := v_0.AuxInt 1081 s := v_0.Aux 1082 x := v_0.Args[0] 1083 if !(is32Bit(c + d)) { 1084 break 1085 } 1086 v.reset(OpAMD64LEAL) 1087 v.AuxInt = c + d 1088 v.Aux = s 1089 v.AddArg(x) 1090 return true 1091 } 1092 return false 1093 } 1094 func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool { 1095 b := v.Block 1096 _ = b 1097 // match: (ADDQ x (MOVQconst [c])) 1098 // cond: is32Bit(c) 1099 // result: (ADDQconst [c] x) 1100 for { 1101 x := v.Args[0] 1102 v_1 := v.Args[1] 1103 if v_1.Op != OpAMD64MOVQconst { 1104 break 1105 } 1106 c := v_1.AuxInt 1107 if !(is32Bit(c)) { 1108 break 1109 } 1110 v.reset(OpAMD64ADDQconst) 1111 v.AuxInt = c 1112 v.AddArg(x) 1113 return true 1114 } 1115 // match: (ADDQ (MOVQconst [c]) x) 1116 // cond: is32Bit(c) 1117 // result: (ADDQconst [c] x) 1118 for { 1119 v_0 := v.Args[0] 1120 if v_0.Op != OpAMD64MOVQconst { 1121 break 1122 } 1123 c := v_0.AuxInt 1124 x := v.Args[1] 1125 if !(is32Bit(c)) { 1126 break 1127 } 1128 v.reset(OpAMD64ADDQconst) 1129 v.AuxInt = c 1130 v.AddArg(x) 1131 return true 1132 } 1133 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [64-c])) 1134 // cond: 1135 // result: (ROLQconst x [ c]) 1136 for { 1137 v_0 := v.Args[0] 1138 if v_0.Op != OpAMD64SHLQconst { 1139 break 1140 } 1141 c := v_0.AuxInt 1142 x := v_0.Args[0] 1143 v_1 := v.Args[1] 1144 if v_1.Op != OpAMD64SHRQconst { 1145 break 1146 } 1147 if v_1.AuxInt != 64-c { 1148 break 1149 } 1150 if x != v_1.Args[0] { 1151 break 1152 } 1153 v.reset(OpAMD64ROLQconst) 1154 v.AuxInt = c 1155 v.AddArg(x) 1156 return true 1157 } 1158 // match: (ADDQ (SHRQconst x [c]) (SHLQconst x [64-c])) 1159 // cond: 1160 // result: (ROLQconst x [64-c]) 1161 for { 1162 v_0 := v.Args[0] 1163 if v_0.Op != OpAMD64SHRQconst { 1164 break 1165 } 1166 c := v_0.AuxInt 1167 x := v_0.Args[0] 1168 v_1 := v.Args[1] 1169 if v_1.Op != OpAMD64SHLQconst { 1170 break 1171 } 1172 if v_1.AuxInt != 64-c { 1173 break 1174 } 1175 if x != v_1.Args[0] { 1176 break 1177 } 1178 v.reset(OpAMD64ROLQconst) 1179 v.AuxInt = 64 - c 1180 v.AddArg(x) 1181 return true 1182 } 1183 // match: (ADDQ x (SHLQconst [3] y)) 1184 // cond: 1185 // result: (LEAQ8 x y) 1186 for { 1187 x := v.Args[0] 1188 v_1 := v.Args[1] 1189 if v_1.Op != OpAMD64SHLQconst { 1190 break 1191 } 1192 if v_1.AuxInt != 3 { 1193 break 1194 } 1195 y := v_1.Args[0] 1196 v.reset(OpAMD64LEAQ8) 1197 v.AddArg(x) 1198 v.AddArg(y) 1199 return true 1200 } 1201 // match: (ADDQ x (SHLQconst [2] y)) 1202 // cond: 1203 // result: (LEAQ4 x y) 1204 for { 1205 x := v.Args[0] 1206 v_1 := v.Args[1] 1207 if v_1.Op != OpAMD64SHLQconst { 1208 break 1209 } 1210 if v_1.AuxInt != 2 { 1211 break 1212 } 1213 y := v_1.Args[0] 1214 v.reset(OpAMD64LEAQ4) 1215 v.AddArg(x) 1216 v.AddArg(y) 1217 return true 1218 } 1219 // match: (ADDQ x (SHLQconst [1] y)) 1220 // cond: 1221 // result: (LEAQ2 x y) 1222 for { 1223 x := v.Args[0] 1224 v_1 := v.Args[1] 1225 if v_1.Op != OpAMD64SHLQconst { 1226 break 1227 } 1228 if v_1.AuxInt != 1 { 1229 break 1230 } 1231 y := v_1.Args[0] 1232 v.reset(OpAMD64LEAQ2) 1233 v.AddArg(x) 1234 v.AddArg(y) 1235 return true 1236 } 1237 // match: (ADDQ x (ADDQ y y)) 1238 // cond: 1239 // result: (LEAQ2 x y) 1240 for { 1241 x := v.Args[0] 1242 v_1 := v.Args[1] 1243 if v_1.Op != OpAMD64ADDQ { 1244 break 1245 } 1246 y := v_1.Args[0] 1247 if y != v_1.Args[1] { 1248 break 1249 } 1250 v.reset(OpAMD64LEAQ2) 1251 v.AddArg(x) 1252 v.AddArg(y) 1253 return true 1254 } 1255 // match: (ADDQ x (ADDQ x y)) 1256 // cond: 1257 // result: (LEAQ2 y x) 1258 for { 1259 x := v.Args[0] 1260 v_1 := v.Args[1] 1261 if v_1.Op != OpAMD64ADDQ { 1262 break 1263 } 1264 if x != v_1.Args[0] { 1265 break 1266 } 1267 y := v_1.Args[1] 1268 v.reset(OpAMD64LEAQ2) 1269 v.AddArg(y) 1270 v.AddArg(x) 1271 return true 1272 } 1273 // match: (ADDQ x (ADDQ y x)) 1274 // cond: 1275 // result: (LEAQ2 y x) 1276 for { 1277 x := v.Args[0] 1278 v_1 := v.Args[1] 1279 if v_1.Op != OpAMD64ADDQ { 1280 break 1281 } 1282 y := v_1.Args[0] 1283 if x != v_1.Args[1] { 1284 break 1285 } 1286 v.reset(OpAMD64LEAQ2) 1287 v.AddArg(y) 1288 v.AddArg(x) 1289 return true 1290 } 1291 // match: (ADDQ (ADDQconst [c] x) y) 1292 // cond: 1293 // result: (LEAQ1 [c] x y) 1294 for { 1295 v_0 := v.Args[0] 1296 if v_0.Op != OpAMD64ADDQconst { 1297 break 1298 } 1299 c := v_0.AuxInt 1300 x := v_0.Args[0] 1301 y := v.Args[1] 1302 v.reset(OpAMD64LEAQ1) 1303 v.AuxInt = c 1304 v.AddArg(x) 1305 v.AddArg(y) 1306 return true 1307 } 1308 // match: (ADDQ x (ADDQconst [c] y)) 1309 // cond: 1310 // result: (LEAQ1 [c] x y) 1311 for { 1312 x := v.Args[0] 1313 v_1 := v.Args[1] 1314 if v_1.Op != OpAMD64ADDQconst { 1315 break 1316 } 1317 c := v_1.AuxInt 1318 y := v_1.Args[0] 1319 v.reset(OpAMD64LEAQ1) 1320 v.AuxInt = c 1321 v.AddArg(x) 1322 v.AddArg(y) 1323 return true 1324 } 1325 // match: (ADDQ x (LEAQ [c] {s} y)) 1326 // cond: x.Op != OpSB && y.Op != OpSB 1327 // result: (LEAQ1 [c] {s} x y) 1328 for { 1329 x := v.Args[0] 1330 v_1 := v.Args[1] 1331 if v_1.Op != OpAMD64LEAQ { 1332 break 1333 } 1334 c := v_1.AuxInt 1335 s := v_1.Aux 1336 y := v_1.Args[0] 1337 if !(x.Op != OpSB && y.Op != OpSB) { 1338 break 1339 } 1340 v.reset(OpAMD64LEAQ1) 1341 v.AuxInt = c 1342 v.Aux = s 1343 v.AddArg(x) 1344 v.AddArg(y) 1345 return true 1346 } 1347 // match: (ADDQ (LEAQ [c] {s} x) y) 1348 // cond: x.Op != OpSB && y.Op != OpSB 1349 // result: (LEAQ1 [c] {s} x y) 1350 for { 1351 v_0 := v.Args[0] 1352 if v_0.Op != OpAMD64LEAQ { 1353 break 1354 } 1355 c := v_0.AuxInt 1356 s := v_0.Aux 1357 x := v_0.Args[0] 1358 y := v.Args[1] 1359 if !(x.Op != OpSB && y.Op != OpSB) { 1360 break 1361 } 1362 v.reset(OpAMD64LEAQ1) 1363 v.AuxInt = c 1364 v.Aux = s 1365 v.AddArg(x) 1366 v.AddArg(y) 1367 return true 1368 } 1369 // match: (ADDQ x (NEGQ y)) 1370 // cond: 1371 // result: (SUBQ x y) 1372 for { 1373 x := v.Args[0] 1374 v_1 := v.Args[1] 1375 if v_1.Op != OpAMD64NEGQ { 1376 break 1377 } 1378 y := v_1.Args[0] 1379 v.reset(OpAMD64SUBQ) 1380 v.AddArg(x) 1381 v.AddArg(y) 1382 return true 1383 } 1384 return false 1385 } 1386 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool { 1387 b := v.Block 1388 _ = b 1389 // match: (ADDQconst [c] (ADDQ x y)) 1390 // cond: 1391 // result: (LEAQ1 [c] x y) 1392 for { 1393 c := v.AuxInt 1394 v_0 := v.Args[0] 1395 if v_0.Op != OpAMD64ADDQ { 1396 break 1397 } 1398 x := v_0.Args[0] 1399 y := v_0.Args[1] 1400 v.reset(OpAMD64LEAQ1) 1401 v.AuxInt = c 1402 v.AddArg(x) 1403 v.AddArg(y) 1404 return true 1405 } 1406 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1407 // cond: is32Bit(c+d) 1408 // result: (LEAQ [c+d] {s} x) 1409 for { 1410 c := v.AuxInt 1411 v_0 := v.Args[0] 1412 if v_0.Op != OpAMD64LEAQ { 1413 break 1414 } 1415 d := v_0.AuxInt 1416 s := v_0.Aux 1417 x := v_0.Args[0] 1418 if !(is32Bit(c + d)) { 1419 break 1420 } 1421 v.reset(OpAMD64LEAQ) 1422 v.AuxInt = c + d 1423 v.Aux = s 1424 v.AddArg(x) 1425 return true 1426 } 1427 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1428 // cond: is32Bit(c+d) 1429 // result: (LEAQ1 [c+d] {s} x y) 1430 for { 1431 c := v.AuxInt 1432 v_0 := v.Args[0] 1433 if v_0.Op != OpAMD64LEAQ1 { 1434 break 1435 } 1436 d := v_0.AuxInt 1437 s := v_0.Aux 1438 x := v_0.Args[0] 1439 y := v_0.Args[1] 1440 if !(is32Bit(c + d)) { 1441 break 1442 } 1443 v.reset(OpAMD64LEAQ1) 1444 v.AuxInt = c + d 1445 v.Aux = s 1446 v.AddArg(x) 1447 v.AddArg(y) 1448 return true 1449 } 1450 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1451 // cond: is32Bit(c+d) 1452 // result: (LEAQ2 [c+d] {s} x y) 1453 for { 1454 c := v.AuxInt 1455 v_0 := v.Args[0] 1456 if v_0.Op != OpAMD64LEAQ2 { 1457 break 1458 } 1459 d := v_0.AuxInt 1460 s := v_0.Aux 1461 x := v_0.Args[0] 1462 y := v_0.Args[1] 1463 if !(is32Bit(c + d)) { 1464 break 1465 } 1466 v.reset(OpAMD64LEAQ2) 1467 v.AuxInt = c + d 1468 v.Aux = s 1469 v.AddArg(x) 1470 v.AddArg(y) 1471 return true 1472 } 1473 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1474 // cond: is32Bit(c+d) 1475 // result: (LEAQ4 [c+d] {s} x y) 1476 for { 1477 c := v.AuxInt 1478 v_0 := v.Args[0] 1479 if v_0.Op != OpAMD64LEAQ4 { 1480 break 1481 } 1482 d := v_0.AuxInt 1483 s := v_0.Aux 1484 x := v_0.Args[0] 1485 y := v_0.Args[1] 1486 if !(is32Bit(c + d)) { 1487 break 1488 } 1489 v.reset(OpAMD64LEAQ4) 1490 v.AuxInt = c + d 1491 v.Aux = s 1492 v.AddArg(x) 1493 v.AddArg(y) 1494 return true 1495 } 1496 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1497 // cond: is32Bit(c+d) 1498 // result: (LEAQ8 [c+d] {s} x y) 1499 for { 1500 c := v.AuxInt 1501 v_0 := v.Args[0] 1502 if v_0.Op != OpAMD64LEAQ8 { 1503 break 1504 } 1505 d := v_0.AuxInt 1506 s := v_0.Aux 1507 x := v_0.Args[0] 1508 y := v_0.Args[1] 1509 if !(is32Bit(c + d)) { 1510 break 1511 } 1512 v.reset(OpAMD64LEAQ8) 1513 v.AuxInt = c + d 1514 v.Aux = s 1515 v.AddArg(x) 1516 v.AddArg(y) 1517 return true 1518 } 1519 // match: (ADDQconst [0] x) 1520 // cond: 1521 // result: x 1522 for { 1523 if v.AuxInt != 0 { 1524 break 1525 } 1526 x := v.Args[0] 1527 v.reset(OpCopy) 1528 v.Type = x.Type 1529 v.AddArg(x) 1530 return true 1531 } 1532 // match: (ADDQconst [c] (MOVQconst [d])) 1533 // cond: 1534 // result: (MOVQconst [c+d]) 1535 for { 1536 c := v.AuxInt 1537 v_0 := v.Args[0] 1538 if v_0.Op != OpAMD64MOVQconst { 1539 break 1540 } 1541 d := v_0.AuxInt 1542 v.reset(OpAMD64MOVQconst) 1543 v.AuxInt = c + d 1544 return true 1545 } 1546 // match: (ADDQconst [c] (ADDQconst [d] x)) 1547 // cond: is32Bit(c+d) 1548 // result: (ADDQconst [c+d] x) 1549 for { 1550 c := v.AuxInt 1551 v_0 := v.Args[0] 1552 if v_0.Op != OpAMD64ADDQconst { 1553 break 1554 } 1555 d := v_0.AuxInt 1556 x := v_0.Args[0] 1557 if !(is32Bit(c + d)) { 1558 break 1559 } 1560 v.reset(OpAMD64ADDQconst) 1561 v.AuxInt = c + d 1562 v.AddArg(x) 1563 return true 1564 } 1565 return false 1566 } 1567 func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool { 1568 b := v.Block 1569 _ = b 1570 // match: (ANDL x (MOVLconst [c])) 1571 // cond: 1572 // result: (ANDLconst [c] x) 1573 for { 1574 x := v.Args[0] 1575 v_1 := v.Args[1] 1576 if v_1.Op != OpAMD64MOVLconst { 1577 break 1578 } 1579 c := v_1.AuxInt 1580 v.reset(OpAMD64ANDLconst) 1581 v.AuxInt = c 1582 v.AddArg(x) 1583 return true 1584 } 1585 // match: (ANDL (MOVLconst [c]) x) 1586 // cond: 1587 // result: (ANDLconst [c] x) 1588 for { 1589 v_0 := v.Args[0] 1590 if v_0.Op != OpAMD64MOVLconst { 1591 break 1592 } 1593 c := v_0.AuxInt 1594 x := v.Args[1] 1595 v.reset(OpAMD64ANDLconst) 1596 v.AuxInt = c 1597 v.AddArg(x) 1598 return true 1599 } 1600 // match: (ANDL x x) 1601 // cond: 1602 // result: x 1603 for { 1604 x := v.Args[0] 1605 if x != v.Args[1] { 1606 break 1607 } 1608 v.reset(OpCopy) 1609 v.Type = x.Type 1610 v.AddArg(x) 1611 return true 1612 } 1613 return false 1614 } 1615 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool { 1616 b := v.Block 1617 _ = b 1618 // match: (ANDLconst [c] (ANDLconst [d] x)) 1619 // cond: 1620 // result: (ANDLconst [c & d] x) 1621 for { 1622 c := v.AuxInt 1623 v_0 := v.Args[0] 1624 if v_0.Op != OpAMD64ANDLconst { 1625 break 1626 } 1627 d := v_0.AuxInt 1628 x := v_0.Args[0] 1629 v.reset(OpAMD64ANDLconst) 1630 v.AuxInt = c & d 1631 v.AddArg(x) 1632 return true 1633 } 1634 // match: (ANDLconst [0xFF] x) 1635 // cond: 1636 // result: (MOVBQZX x) 1637 for { 1638 if v.AuxInt != 0xFF { 1639 break 1640 } 1641 x := v.Args[0] 1642 v.reset(OpAMD64MOVBQZX) 1643 v.AddArg(x) 1644 return true 1645 } 1646 // match: (ANDLconst [0xFFFF] x) 1647 // cond: 1648 // result: (MOVWQZX x) 1649 for { 1650 if v.AuxInt != 0xFFFF { 1651 break 1652 } 1653 x := v.Args[0] 1654 v.reset(OpAMD64MOVWQZX) 1655 v.AddArg(x) 1656 return true 1657 } 1658 // match: (ANDLconst [c] _) 1659 // cond: int32(c)==0 1660 // result: (MOVLconst [0]) 1661 for { 1662 c := v.AuxInt 1663 if !(int32(c) == 0) { 1664 break 1665 } 1666 v.reset(OpAMD64MOVLconst) 1667 v.AuxInt = 0 1668 return true 1669 } 1670 // match: (ANDLconst [c] x) 1671 // cond: int32(c)==-1 1672 // result: x 1673 for { 1674 c := v.AuxInt 1675 x := v.Args[0] 1676 if !(int32(c) == -1) { 1677 break 1678 } 1679 v.reset(OpCopy) 1680 v.Type = x.Type 1681 v.AddArg(x) 1682 return true 1683 } 1684 // match: (ANDLconst [c] (MOVLconst [d])) 1685 // cond: 1686 // result: (MOVLconst [c&d]) 1687 for { 1688 c := v.AuxInt 1689 v_0 := v.Args[0] 1690 if v_0.Op != OpAMD64MOVLconst { 1691 break 1692 } 1693 d := v_0.AuxInt 1694 v.reset(OpAMD64MOVLconst) 1695 v.AuxInt = c & d 1696 return true 1697 } 1698 return false 1699 } 1700 func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool { 1701 b := v.Block 1702 _ = b 1703 // match: (ANDQ x (MOVQconst [c])) 1704 // cond: is32Bit(c) 1705 // result: (ANDQconst [c] x) 1706 for { 1707 x := v.Args[0] 1708 v_1 := v.Args[1] 1709 if v_1.Op != OpAMD64MOVQconst { 1710 break 1711 } 1712 c := v_1.AuxInt 1713 if !(is32Bit(c)) { 1714 break 1715 } 1716 v.reset(OpAMD64ANDQconst) 1717 v.AuxInt = c 1718 v.AddArg(x) 1719 return true 1720 } 1721 // match: (ANDQ (MOVQconst [c]) x) 1722 // cond: is32Bit(c) 1723 // result: (ANDQconst [c] x) 1724 for { 1725 v_0 := v.Args[0] 1726 if v_0.Op != OpAMD64MOVQconst { 1727 break 1728 } 1729 c := v_0.AuxInt 1730 x := v.Args[1] 1731 if !(is32Bit(c)) { 1732 break 1733 } 1734 v.reset(OpAMD64ANDQconst) 1735 v.AuxInt = c 1736 v.AddArg(x) 1737 return true 1738 } 1739 // match: (ANDQ x x) 1740 // cond: 1741 // result: x 1742 for { 1743 x := v.Args[0] 1744 if x != v.Args[1] { 1745 break 1746 } 1747 v.reset(OpCopy) 1748 v.Type = x.Type 1749 v.AddArg(x) 1750 return true 1751 } 1752 return false 1753 } 1754 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { 1755 b := v.Block 1756 _ = b 1757 // match: (ANDQconst [c] (ANDQconst [d] x)) 1758 // cond: 1759 // result: (ANDQconst [c & d] x) 1760 for { 1761 c := v.AuxInt 1762 v_0 := v.Args[0] 1763 if v_0.Op != OpAMD64ANDQconst { 1764 break 1765 } 1766 d := v_0.AuxInt 1767 x := v_0.Args[0] 1768 v.reset(OpAMD64ANDQconst) 1769 v.AuxInt = c & d 1770 v.AddArg(x) 1771 return true 1772 } 1773 // match: (ANDQconst [0xFF] x) 1774 // cond: 1775 // result: (MOVBQZX x) 1776 for { 1777 if v.AuxInt != 0xFF { 1778 break 1779 } 1780 x := v.Args[0] 1781 v.reset(OpAMD64MOVBQZX) 1782 v.AddArg(x) 1783 return true 1784 } 1785 // match: (ANDQconst [0xFFFF] x) 1786 // cond: 1787 // result: (MOVWQZX x) 1788 for { 1789 if v.AuxInt != 0xFFFF { 1790 break 1791 } 1792 x := v.Args[0] 1793 v.reset(OpAMD64MOVWQZX) 1794 v.AddArg(x) 1795 return true 1796 } 1797 // match: (ANDQconst [0xFFFFFFFF] x) 1798 // cond: 1799 // result: (MOVLQZX x) 1800 for { 1801 if v.AuxInt != 0xFFFFFFFF { 1802 break 1803 } 1804 x := v.Args[0] 1805 v.reset(OpAMD64MOVLQZX) 1806 v.AddArg(x) 1807 return true 1808 } 1809 // match: (ANDQconst [0] _) 1810 // cond: 1811 // result: (MOVQconst [0]) 1812 for { 1813 if v.AuxInt != 0 { 1814 break 1815 } 1816 v.reset(OpAMD64MOVQconst) 1817 v.AuxInt = 0 1818 return true 1819 } 1820 // match: (ANDQconst [-1] x) 1821 // cond: 1822 // result: x 1823 for { 1824 if v.AuxInt != -1 { 1825 break 1826 } 1827 x := v.Args[0] 1828 v.reset(OpCopy) 1829 v.Type = x.Type 1830 v.AddArg(x) 1831 return true 1832 } 1833 // match: (ANDQconst [c] (MOVQconst [d])) 1834 // cond: 1835 // result: (MOVQconst [c&d]) 1836 for { 1837 c := v.AuxInt 1838 v_0 := v.Args[0] 1839 if v_0.Op != OpAMD64MOVQconst { 1840 break 1841 } 1842 d := v_0.AuxInt 1843 v.reset(OpAMD64MOVQconst) 1844 v.AuxInt = c & d 1845 return true 1846 } 1847 return false 1848 } 1849 func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { 1850 b := v.Block 1851 _ = b 1852 // match: (CMPB x (MOVLconst [c])) 1853 // cond: 1854 // result: (CMPBconst x [int64(int8(c))]) 1855 for { 1856 x := v.Args[0] 1857 v_1 := v.Args[1] 1858 if v_1.Op != OpAMD64MOVLconst { 1859 break 1860 } 1861 c := v_1.AuxInt 1862 v.reset(OpAMD64CMPBconst) 1863 v.AuxInt = int64(int8(c)) 1864 v.AddArg(x) 1865 return true 1866 } 1867 // match: (CMPB (MOVLconst [c]) x) 1868 // cond: 1869 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 1870 for { 1871 v_0 := v.Args[0] 1872 if v_0.Op != OpAMD64MOVLconst { 1873 break 1874 } 1875 c := v_0.AuxInt 1876 x := v.Args[1] 1877 v.reset(OpAMD64InvertFlags) 1878 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 1879 v0.AuxInt = int64(int8(c)) 1880 v0.AddArg(x) 1881 v.AddArg(v0) 1882 return true 1883 } 1884 return false 1885 } 1886 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { 1887 b := v.Block 1888 _ = b 1889 // match: (CMPBconst (MOVLconst [x]) [y]) 1890 // cond: int8(x)==int8(y) 1891 // result: (FlagEQ) 1892 for { 1893 y := v.AuxInt 1894 v_0 := v.Args[0] 1895 if v_0.Op != OpAMD64MOVLconst { 1896 break 1897 } 1898 x := v_0.AuxInt 1899 if !(int8(x) == int8(y)) { 1900 break 1901 } 1902 v.reset(OpAMD64FlagEQ) 1903 return true 1904 } 1905 // match: (CMPBconst (MOVLconst [x]) [y]) 1906 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 1907 // result: (FlagLT_ULT) 1908 for { 1909 y := v.AuxInt 1910 v_0 := v.Args[0] 1911 if v_0.Op != OpAMD64MOVLconst { 1912 break 1913 } 1914 x := v_0.AuxInt 1915 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 1916 break 1917 } 1918 v.reset(OpAMD64FlagLT_ULT) 1919 return true 1920 } 1921 // match: (CMPBconst (MOVLconst [x]) [y]) 1922 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 1923 // result: (FlagLT_UGT) 1924 for { 1925 y := v.AuxInt 1926 v_0 := v.Args[0] 1927 if v_0.Op != OpAMD64MOVLconst { 1928 break 1929 } 1930 x := v_0.AuxInt 1931 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 1932 break 1933 } 1934 v.reset(OpAMD64FlagLT_UGT) 1935 return true 1936 } 1937 // match: (CMPBconst (MOVLconst [x]) [y]) 1938 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 1939 // result: (FlagGT_ULT) 1940 for { 1941 y := v.AuxInt 1942 v_0 := v.Args[0] 1943 if v_0.Op != OpAMD64MOVLconst { 1944 break 1945 } 1946 x := v_0.AuxInt 1947 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 1948 break 1949 } 1950 v.reset(OpAMD64FlagGT_ULT) 1951 return true 1952 } 1953 // match: (CMPBconst (MOVLconst [x]) [y]) 1954 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 1955 // result: (FlagGT_UGT) 1956 for { 1957 y := v.AuxInt 1958 v_0 := v.Args[0] 1959 if v_0.Op != OpAMD64MOVLconst { 1960 break 1961 } 1962 x := v_0.AuxInt 1963 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 1964 break 1965 } 1966 v.reset(OpAMD64FlagGT_UGT) 1967 return true 1968 } 1969 // match: (CMPBconst (ANDLconst _ [m]) [n]) 1970 // cond: 0 <= int8(m) && int8(m) < int8(n) 1971 // result: (FlagLT_ULT) 1972 for { 1973 n := v.AuxInt 1974 v_0 := v.Args[0] 1975 if v_0.Op != OpAMD64ANDLconst { 1976 break 1977 } 1978 m := v_0.AuxInt 1979 if !(0 <= int8(m) && int8(m) < int8(n)) { 1980 break 1981 } 1982 v.reset(OpAMD64FlagLT_ULT) 1983 return true 1984 } 1985 // match: (CMPBconst (ANDL x y) [0]) 1986 // cond: 1987 // result: (TESTB x y) 1988 for { 1989 if v.AuxInt != 0 { 1990 break 1991 } 1992 v_0 := v.Args[0] 1993 if v_0.Op != OpAMD64ANDL { 1994 break 1995 } 1996 x := v_0.Args[0] 1997 y := v_0.Args[1] 1998 v.reset(OpAMD64TESTB) 1999 v.AddArg(x) 2000 v.AddArg(y) 2001 return true 2002 } 2003 // match: (CMPBconst (ANDLconst [c] x) [0]) 2004 // cond: 2005 // result: (TESTBconst [int64(int8(c))] x) 2006 for { 2007 if v.AuxInt != 0 { 2008 break 2009 } 2010 v_0 := v.Args[0] 2011 if v_0.Op != OpAMD64ANDLconst { 2012 break 2013 } 2014 c := v_0.AuxInt 2015 x := v_0.Args[0] 2016 v.reset(OpAMD64TESTBconst) 2017 v.AuxInt = int64(int8(c)) 2018 v.AddArg(x) 2019 return true 2020 } 2021 // match: (CMPBconst x [0]) 2022 // cond: 2023 // result: (TESTB x x) 2024 for { 2025 if v.AuxInt != 0 { 2026 break 2027 } 2028 x := v.Args[0] 2029 v.reset(OpAMD64TESTB) 2030 v.AddArg(x) 2031 v.AddArg(x) 2032 return true 2033 } 2034 return false 2035 } 2036 func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool { 2037 b := v.Block 2038 _ = b 2039 // match: (CMPL x (MOVLconst [c])) 2040 // cond: 2041 // result: (CMPLconst x [c]) 2042 for { 2043 x := v.Args[0] 2044 v_1 := v.Args[1] 2045 if v_1.Op != OpAMD64MOVLconst { 2046 break 2047 } 2048 c := v_1.AuxInt 2049 v.reset(OpAMD64CMPLconst) 2050 v.AuxInt = c 2051 v.AddArg(x) 2052 return true 2053 } 2054 // match: (CMPL (MOVLconst [c]) x) 2055 // cond: 2056 // result: (InvertFlags (CMPLconst x [c])) 2057 for { 2058 v_0 := v.Args[0] 2059 if v_0.Op != OpAMD64MOVLconst { 2060 break 2061 } 2062 c := v_0.AuxInt 2063 x := v.Args[1] 2064 v.reset(OpAMD64InvertFlags) 2065 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 2066 v0.AuxInt = c 2067 v0.AddArg(x) 2068 v.AddArg(v0) 2069 return true 2070 } 2071 return false 2072 } 2073 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool { 2074 b := v.Block 2075 _ = b 2076 // match: (CMPLconst (MOVLconst [x]) [y]) 2077 // cond: int32(x)==int32(y) 2078 // result: (FlagEQ) 2079 for { 2080 y := v.AuxInt 2081 v_0 := v.Args[0] 2082 if v_0.Op != OpAMD64MOVLconst { 2083 break 2084 } 2085 x := v_0.AuxInt 2086 if !(int32(x) == int32(y)) { 2087 break 2088 } 2089 v.reset(OpAMD64FlagEQ) 2090 return true 2091 } 2092 // match: (CMPLconst (MOVLconst [x]) [y]) 2093 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 2094 // result: (FlagLT_ULT) 2095 for { 2096 y := v.AuxInt 2097 v_0 := v.Args[0] 2098 if v_0.Op != OpAMD64MOVLconst { 2099 break 2100 } 2101 x := v_0.AuxInt 2102 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 2103 break 2104 } 2105 v.reset(OpAMD64FlagLT_ULT) 2106 return true 2107 } 2108 // match: (CMPLconst (MOVLconst [x]) [y]) 2109 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 2110 // result: (FlagLT_UGT) 2111 for { 2112 y := v.AuxInt 2113 v_0 := v.Args[0] 2114 if v_0.Op != OpAMD64MOVLconst { 2115 break 2116 } 2117 x := v_0.AuxInt 2118 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 2119 break 2120 } 2121 v.reset(OpAMD64FlagLT_UGT) 2122 return true 2123 } 2124 // match: (CMPLconst (MOVLconst [x]) [y]) 2125 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 2126 // result: (FlagGT_ULT) 2127 for { 2128 y := v.AuxInt 2129 v_0 := v.Args[0] 2130 if v_0.Op != OpAMD64MOVLconst { 2131 break 2132 } 2133 x := v_0.AuxInt 2134 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 2135 break 2136 } 2137 v.reset(OpAMD64FlagGT_ULT) 2138 return true 2139 } 2140 // match: (CMPLconst (MOVLconst [x]) [y]) 2141 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 2142 // result: (FlagGT_UGT) 2143 for { 2144 y := v.AuxInt 2145 v_0 := v.Args[0] 2146 if v_0.Op != OpAMD64MOVLconst { 2147 break 2148 } 2149 x := v_0.AuxInt 2150 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 2151 break 2152 } 2153 v.reset(OpAMD64FlagGT_UGT) 2154 return true 2155 } 2156 // match: (CMPLconst (SHRLconst _ [c]) [n]) 2157 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 2158 // result: (FlagLT_ULT) 2159 for { 2160 n := v.AuxInt 2161 v_0 := v.Args[0] 2162 if v_0.Op != OpAMD64SHRLconst { 2163 break 2164 } 2165 c := v_0.AuxInt 2166 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 2167 break 2168 } 2169 v.reset(OpAMD64FlagLT_ULT) 2170 return true 2171 } 2172 // match: (CMPLconst (ANDLconst _ [m]) [n]) 2173 // cond: 0 <= int32(m) && int32(m) < int32(n) 2174 // result: (FlagLT_ULT) 2175 for { 2176 n := v.AuxInt 2177 v_0 := v.Args[0] 2178 if v_0.Op != OpAMD64ANDLconst { 2179 break 2180 } 2181 m := v_0.AuxInt 2182 if !(0 <= int32(m) && int32(m) < int32(n)) { 2183 break 2184 } 2185 v.reset(OpAMD64FlagLT_ULT) 2186 return true 2187 } 2188 // match: (CMPLconst (ANDL x y) [0]) 2189 // cond: 2190 // result: (TESTL x y) 2191 for { 2192 if v.AuxInt != 0 { 2193 break 2194 } 2195 v_0 := v.Args[0] 2196 if v_0.Op != OpAMD64ANDL { 2197 break 2198 } 2199 x := v_0.Args[0] 2200 y := v_0.Args[1] 2201 v.reset(OpAMD64TESTL) 2202 v.AddArg(x) 2203 v.AddArg(y) 2204 return true 2205 } 2206 // match: (CMPLconst (ANDLconst [c] x) [0]) 2207 // cond: 2208 // result: (TESTLconst [c] x) 2209 for { 2210 if v.AuxInt != 0 { 2211 break 2212 } 2213 v_0 := v.Args[0] 2214 if v_0.Op != OpAMD64ANDLconst { 2215 break 2216 } 2217 c := v_0.AuxInt 2218 x := v_0.Args[0] 2219 v.reset(OpAMD64TESTLconst) 2220 v.AuxInt = c 2221 v.AddArg(x) 2222 return true 2223 } 2224 // match: (CMPLconst x [0]) 2225 // cond: 2226 // result: (TESTL x x) 2227 for { 2228 if v.AuxInt != 0 { 2229 break 2230 } 2231 x := v.Args[0] 2232 v.reset(OpAMD64TESTL) 2233 v.AddArg(x) 2234 v.AddArg(x) 2235 return true 2236 } 2237 return false 2238 } 2239 func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool { 2240 b := v.Block 2241 _ = b 2242 // match: (CMPQ x (MOVQconst [c])) 2243 // cond: is32Bit(c) 2244 // result: (CMPQconst x [c]) 2245 for { 2246 x := v.Args[0] 2247 v_1 := v.Args[1] 2248 if v_1.Op != OpAMD64MOVQconst { 2249 break 2250 } 2251 c := v_1.AuxInt 2252 if !(is32Bit(c)) { 2253 break 2254 } 2255 v.reset(OpAMD64CMPQconst) 2256 v.AuxInt = c 2257 v.AddArg(x) 2258 return true 2259 } 2260 // match: (CMPQ (MOVQconst [c]) x) 2261 // cond: is32Bit(c) 2262 // result: (InvertFlags (CMPQconst x [c])) 2263 for { 2264 v_0 := v.Args[0] 2265 if v_0.Op != OpAMD64MOVQconst { 2266 break 2267 } 2268 c := v_0.AuxInt 2269 x := v.Args[1] 2270 if !(is32Bit(c)) { 2271 break 2272 } 2273 v.reset(OpAMD64InvertFlags) 2274 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 2275 v0.AuxInt = c 2276 v0.AddArg(x) 2277 v.AddArg(v0) 2278 return true 2279 } 2280 return false 2281 } 2282 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool { 2283 b := v.Block 2284 _ = b 2285 // match: (CMPQconst (MOVQconst [x]) [y]) 2286 // cond: x==y 2287 // result: (FlagEQ) 2288 for { 2289 y := v.AuxInt 2290 v_0 := v.Args[0] 2291 if v_0.Op != OpAMD64MOVQconst { 2292 break 2293 } 2294 x := v_0.AuxInt 2295 if !(x == y) { 2296 break 2297 } 2298 v.reset(OpAMD64FlagEQ) 2299 return true 2300 } 2301 // match: (CMPQconst (MOVQconst [x]) [y]) 2302 // cond: x<y && uint64(x)<uint64(y) 2303 // result: (FlagLT_ULT) 2304 for { 2305 y := v.AuxInt 2306 v_0 := v.Args[0] 2307 if v_0.Op != OpAMD64MOVQconst { 2308 break 2309 } 2310 x := v_0.AuxInt 2311 if !(x < y && uint64(x) < uint64(y)) { 2312 break 2313 } 2314 v.reset(OpAMD64FlagLT_ULT) 2315 return true 2316 } 2317 // match: (CMPQconst (MOVQconst [x]) [y]) 2318 // cond: x<y && uint64(x)>uint64(y) 2319 // result: (FlagLT_UGT) 2320 for { 2321 y := v.AuxInt 2322 v_0 := v.Args[0] 2323 if v_0.Op != OpAMD64MOVQconst { 2324 break 2325 } 2326 x := v_0.AuxInt 2327 if !(x < y && uint64(x) > uint64(y)) { 2328 break 2329 } 2330 v.reset(OpAMD64FlagLT_UGT) 2331 return true 2332 } 2333 // match: (CMPQconst (MOVQconst [x]) [y]) 2334 // cond: x>y && uint64(x)<uint64(y) 2335 // result: (FlagGT_ULT) 2336 for { 2337 y := v.AuxInt 2338 v_0 := v.Args[0] 2339 if v_0.Op != OpAMD64MOVQconst { 2340 break 2341 } 2342 x := v_0.AuxInt 2343 if !(x > y && uint64(x) < uint64(y)) { 2344 break 2345 } 2346 v.reset(OpAMD64FlagGT_ULT) 2347 return true 2348 } 2349 // match: (CMPQconst (MOVQconst [x]) [y]) 2350 // cond: x>y && uint64(x)>uint64(y) 2351 // result: (FlagGT_UGT) 2352 for { 2353 y := v.AuxInt 2354 v_0 := v.Args[0] 2355 if v_0.Op != OpAMD64MOVQconst { 2356 break 2357 } 2358 x := v_0.AuxInt 2359 if !(x > y && uint64(x) > uint64(y)) { 2360 break 2361 } 2362 v.reset(OpAMD64FlagGT_UGT) 2363 return true 2364 } 2365 // match: (CMPQconst (MOVBQZX _) [c]) 2366 // cond: 0xFF < c 2367 // result: (FlagLT_ULT) 2368 for { 2369 c := v.AuxInt 2370 v_0 := v.Args[0] 2371 if v_0.Op != OpAMD64MOVBQZX { 2372 break 2373 } 2374 if !(0xFF < c) { 2375 break 2376 } 2377 v.reset(OpAMD64FlagLT_ULT) 2378 return true 2379 } 2380 // match: (CMPQconst (MOVWQZX _) [c]) 2381 // cond: 0xFFFF < c 2382 // result: (FlagLT_ULT) 2383 for { 2384 c := v.AuxInt 2385 v_0 := v.Args[0] 2386 if v_0.Op != OpAMD64MOVWQZX { 2387 break 2388 } 2389 if !(0xFFFF < c) { 2390 break 2391 } 2392 v.reset(OpAMD64FlagLT_ULT) 2393 return true 2394 } 2395 // match: (CMPQconst (MOVLQZX _) [c]) 2396 // cond: 0xFFFFFFFF < c 2397 // result: (FlagLT_ULT) 2398 for { 2399 c := v.AuxInt 2400 v_0 := v.Args[0] 2401 if v_0.Op != OpAMD64MOVLQZX { 2402 break 2403 } 2404 if !(0xFFFFFFFF < c) { 2405 break 2406 } 2407 v.reset(OpAMD64FlagLT_ULT) 2408 return true 2409 } 2410 // match: (CMPQconst (SHRQconst _ [c]) [n]) 2411 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 2412 // result: (FlagLT_ULT) 2413 for { 2414 n := v.AuxInt 2415 v_0 := v.Args[0] 2416 if v_0.Op != OpAMD64SHRQconst { 2417 break 2418 } 2419 c := v_0.AuxInt 2420 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 2421 break 2422 } 2423 v.reset(OpAMD64FlagLT_ULT) 2424 return true 2425 } 2426 // match: (CMPQconst (ANDQconst _ [m]) [n]) 2427 // cond: 0 <= m && m < n 2428 // result: (FlagLT_ULT) 2429 for { 2430 n := v.AuxInt 2431 v_0 := v.Args[0] 2432 if v_0.Op != OpAMD64ANDQconst { 2433 break 2434 } 2435 m := v_0.AuxInt 2436 if !(0 <= m && m < n) { 2437 break 2438 } 2439 v.reset(OpAMD64FlagLT_ULT) 2440 return true 2441 } 2442 // match: (CMPQconst (ANDQ x y) [0]) 2443 // cond: 2444 // result: (TESTQ x y) 2445 for { 2446 if v.AuxInt != 0 { 2447 break 2448 } 2449 v_0 := v.Args[0] 2450 if v_0.Op != OpAMD64ANDQ { 2451 break 2452 } 2453 x := v_0.Args[0] 2454 y := v_0.Args[1] 2455 v.reset(OpAMD64TESTQ) 2456 v.AddArg(x) 2457 v.AddArg(y) 2458 return true 2459 } 2460 // match: (CMPQconst (ANDQconst [c] x) [0]) 2461 // cond: 2462 // result: (TESTQconst [c] x) 2463 for { 2464 if v.AuxInt != 0 { 2465 break 2466 } 2467 v_0 := v.Args[0] 2468 if v_0.Op != OpAMD64ANDQconst { 2469 break 2470 } 2471 c := v_0.AuxInt 2472 x := v_0.Args[0] 2473 v.reset(OpAMD64TESTQconst) 2474 v.AuxInt = c 2475 v.AddArg(x) 2476 return true 2477 } 2478 // match: (CMPQconst x [0]) 2479 // cond: 2480 // result: (TESTQ x x) 2481 for { 2482 if v.AuxInt != 0 { 2483 break 2484 } 2485 x := v.Args[0] 2486 v.reset(OpAMD64TESTQ) 2487 v.AddArg(x) 2488 v.AddArg(x) 2489 return true 2490 } 2491 return false 2492 } 2493 func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { 2494 b := v.Block 2495 _ = b 2496 // match: (CMPW x (MOVLconst [c])) 2497 // cond: 2498 // result: (CMPWconst x [int64(int16(c))]) 2499 for { 2500 x := v.Args[0] 2501 v_1 := v.Args[1] 2502 if v_1.Op != OpAMD64MOVLconst { 2503 break 2504 } 2505 c := v_1.AuxInt 2506 v.reset(OpAMD64CMPWconst) 2507 v.AuxInt = int64(int16(c)) 2508 v.AddArg(x) 2509 return true 2510 } 2511 // match: (CMPW (MOVLconst [c]) x) 2512 // cond: 2513 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 2514 for { 2515 v_0 := v.Args[0] 2516 if v_0.Op != OpAMD64MOVLconst { 2517 break 2518 } 2519 c := v_0.AuxInt 2520 x := v.Args[1] 2521 v.reset(OpAMD64InvertFlags) 2522 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 2523 v0.AuxInt = int64(int16(c)) 2524 v0.AddArg(x) 2525 v.AddArg(v0) 2526 return true 2527 } 2528 return false 2529 } 2530 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { 2531 b := v.Block 2532 _ = b 2533 // match: (CMPWconst (MOVLconst [x]) [y]) 2534 // cond: int16(x)==int16(y) 2535 // result: (FlagEQ) 2536 for { 2537 y := v.AuxInt 2538 v_0 := v.Args[0] 2539 if v_0.Op != OpAMD64MOVLconst { 2540 break 2541 } 2542 x := v_0.AuxInt 2543 if !(int16(x) == int16(y)) { 2544 break 2545 } 2546 v.reset(OpAMD64FlagEQ) 2547 return true 2548 } 2549 // match: (CMPWconst (MOVLconst [x]) [y]) 2550 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 2551 // result: (FlagLT_ULT) 2552 for { 2553 y := v.AuxInt 2554 v_0 := v.Args[0] 2555 if v_0.Op != OpAMD64MOVLconst { 2556 break 2557 } 2558 x := v_0.AuxInt 2559 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 2560 break 2561 } 2562 v.reset(OpAMD64FlagLT_ULT) 2563 return true 2564 } 2565 // match: (CMPWconst (MOVLconst [x]) [y]) 2566 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 2567 // result: (FlagLT_UGT) 2568 for { 2569 y := v.AuxInt 2570 v_0 := v.Args[0] 2571 if v_0.Op != OpAMD64MOVLconst { 2572 break 2573 } 2574 x := v_0.AuxInt 2575 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 2576 break 2577 } 2578 v.reset(OpAMD64FlagLT_UGT) 2579 return true 2580 } 2581 // match: (CMPWconst (MOVLconst [x]) [y]) 2582 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 2583 // result: (FlagGT_ULT) 2584 for { 2585 y := v.AuxInt 2586 v_0 := v.Args[0] 2587 if v_0.Op != OpAMD64MOVLconst { 2588 break 2589 } 2590 x := v_0.AuxInt 2591 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 2592 break 2593 } 2594 v.reset(OpAMD64FlagGT_ULT) 2595 return true 2596 } 2597 // match: (CMPWconst (MOVLconst [x]) [y]) 2598 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 2599 // result: (FlagGT_UGT) 2600 for { 2601 y := v.AuxInt 2602 v_0 := v.Args[0] 2603 if v_0.Op != OpAMD64MOVLconst { 2604 break 2605 } 2606 x := v_0.AuxInt 2607 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 2608 break 2609 } 2610 v.reset(OpAMD64FlagGT_UGT) 2611 return true 2612 } 2613 // match: (CMPWconst (ANDLconst _ [m]) [n]) 2614 // cond: 0 <= int16(m) && int16(m) < int16(n) 2615 // result: (FlagLT_ULT) 2616 for { 2617 n := v.AuxInt 2618 v_0 := v.Args[0] 2619 if v_0.Op != OpAMD64ANDLconst { 2620 break 2621 } 2622 m := v_0.AuxInt 2623 if !(0 <= int16(m) && int16(m) < int16(n)) { 2624 break 2625 } 2626 v.reset(OpAMD64FlagLT_ULT) 2627 return true 2628 } 2629 // match: (CMPWconst (ANDL x y) [0]) 2630 // cond: 2631 // result: (TESTW x y) 2632 for { 2633 if v.AuxInt != 0 { 2634 break 2635 } 2636 v_0 := v.Args[0] 2637 if v_0.Op != OpAMD64ANDL { 2638 break 2639 } 2640 x := v_0.Args[0] 2641 y := v_0.Args[1] 2642 v.reset(OpAMD64TESTW) 2643 v.AddArg(x) 2644 v.AddArg(y) 2645 return true 2646 } 2647 // match: (CMPWconst (ANDLconst [c] x) [0]) 2648 // cond: 2649 // result: (TESTWconst [int64(int16(c))] x) 2650 for { 2651 if v.AuxInt != 0 { 2652 break 2653 } 2654 v_0 := v.Args[0] 2655 if v_0.Op != OpAMD64ANDLconst { 2656 break 2657 } 2658 c := v_0.AuxInt 2659 x := v_0.Args[0] 2660 v.reset(OpAMD64TESTWconst) 2661 v.AuxInt = int64(int16(c)) 2662 v.AddArg(x) 2663 return true 2664 } 2665 // match: (CMPWconst x [0]) 2666 // cond: 2667 // result: (TESTW x x) 2668 for { 2669 if v.AuxInt != 0 { 2670 break 2671 } 2672 x := v.Args[0] 2673 v.reset(OpAMD64TESTW) 2674 v.AddArg(x) 2675 v.AddArg(x) 2676 return true 2677 } 2678 return false 2679 } 2680 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value, config *Config) bool { 2681 b := v.Block 2682 _ = b 2683 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 2684 // cond: is32Bit(off1+off2) 2685 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 2686 for { 2687 off1 := v.AuxInt 2688 sym := v.Aux 2689 v_0 := v.Args[0] 2690 if v_0.Op != OpAMD64ADDQconst { 2691 break 2692 } 2693 off2 := v_0.AuxInt 2694 ptr := v_0.Args[0] 2695 old := v.Args[1] 2696 new_ := v.Args[2] 2697 mem := v.Args[3] 2698 if !(is32Bit(off1 + off2)) { 2699 break 2700 } 2701 v.reset(OpAMD64CMPXCHGLlock) 2702 v.AuxInt = off1 + off2 2703 v.Aux = sym 2704 v.AddArg(ptr) 2705 v.AddArg(old) 2706 v.AddArg(new_) 2707 v.AddArg(mem) 2708 return true 2709 } 2710 return false 2711 } 2712 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value, config *Config) bool { 2713 b := v.Block 2714 _ = b 2715 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 2716 // cond: is32Bit(off1+off2) 2717 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 2718 for { 2719 off1 := v.AuxInt 2720 sym := v.Aux 2721 v_0 := v.Args[0] 2722 if v_0.Op != OpAMD64ADDQconst { 2723 break 2724 } 2725 off2 := v_0.AuxInt 2726 ptr := v_0.Args[0] 2727 old := v.Args[1] 2728 new_ := v.Args[2] 2729 mem := v.Args[3] 2730 if !(is32Bit(off1 + off2)) { 2731 break 2732 } 2733 v.reset(OpAMD64CMPXCHGQlock) 2734 v.AuxInt = off1 + off2 2735 v.Aux = sym 2736 v.AddArg(ptr) 2737 v.AddArg(old) 2738 v.AddArg(new_) 2739 v.AddArg(mem) 2740 return true 2741 } 2742 return false 2743 } 2744 func rewriteValueAMD64_OpAMD64LEAL(v *Value, config *Config) bool { 2745 b := v.Block 2746 _ = b 2747 // match: (LEAL [c] {s} (ADDLconst [d] x)) 2748 // cond: is32Bit(c+d) 2749 // result: (LEAL [c+d] {s} x) 2750 for { 2751 c := v.AuxInt 2752 s := v.Aux 2753 v_0 := v.Args[0] 2754 if v_0.Op != OpAMD64ADDLconst { 2755 break 2756 } 2757 d := v_0.AuxInt 2758 x := v_0.Args[0] 2759 if !(is32Bit(c + d)) { 2760 break 2761 } 2762 v.reset(OpAMD64LEAL) 2763 v.AuxInt = c + d 2764 v.Aux = s 2765 v.AddArg(x) 2766 return true 2767 } 2768 return false 2769 } 2770 func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool { 2771 b := v.Block 2772 _ = b 2773 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 2774 // cond: is32Bit(c+d) 2775 // result: (LEAQ [c+d] {s} x) 2776 for { 2777 c := v.AuxInt 2778 s := v.Aux 2779 v_0 := v.Args[0] 2780 if v_0.Op != OpAMD64ADDQconst { 2781 break 2782 } 2783 d := v_0.AuxInt 2784 x := v_0.Args[0] 2785 if !(is32Bit(c + d)) { 2786 break 2787 } 2788 v.reset(OpAMD64LEAQ) 2789 v.AuxInt = c + d 2790 v.Aux = s 2791 v.AddArg(x) 2792 return true 2793 } 2794 // match: (LEAQ [c] {s} (ADDQ x y)) 2795 // cond: x.Op != OpSB && y.Op != OpSB 2796 // result: (LEAQ1 [c] {s} x y) 2797 for { 2798 c := v.AuxInt 2799 s := v.Aux 2800 v_0 := v.Args[0] 2801 if v_0.Op != OpAMD64ADDQ { 2802 break 2803 } 2804 x := v_0.Args[0] 2805 y := v_0.Args[1] 2806 if !(x.Op != OpSB && y.Op != OpSB) { 2807 break 2808 } 2809 v.reset(OpAMD64LEAQ1) 2810 v.AuxInt = c 2811 v.Aux = s 2812 v.AddArg(x) 2813 v.AddArg(y) 2814 return true 2815 } 2816 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 2817 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2818 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 2819 for { 2820 off1 := v.AuxInt 2821 sym1 := v.Aux 2822 v_0 := v.Args[0] 2823 if v_0.Op != OpAMD64LEAQ { 2824 break 2825 } 2826 off2 := v_0.AuxInt 2827 sym2 := v_0.Aux 2828 x := v_0.Args[0] 2829 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2830 break 2831 } 2832 v.reset(OpAMD64LEAQ) 2833 v.AuxInt = off1 + off2 2834 v.Aux = mergeSym(sym1, sym2) 2835 v.AddArg(x) 2836 return true 2837 } 2838 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 2839 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2840 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2841 for { 2842 off1 := v.AuxInt 2843 sym1 := v.Aux 2844 v_0 := v.Args[0] 2845 if v_0.Op != OpAMD64LEAQ1 { 2846 break 2847 } 2848 off2 := v_0.AuxInt 2849 sym2 := v_0.Aux 2850 x := v_0.Args[0] 2851 y := v_0.Args[1] 2852 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2853 break 2854 } 2855 v.reset(OpAMD64LEAQ1) 2856 v.AuxInt = off1 + off2 2857 v.Aux = mergeSym(sym1, sym2) 2858 v.AddArg(x) 2859 v.AddArg(y) 2860 return true 2861 } 2862 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 2863 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2864 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 2865 for { 2866 off1 := v.AuxInt 2867 sym1 := v.Aux 2868 v_0 := v.Args[0] 2869 if v_0.Op != OpAMD64LEAQ2 { 2870 break 2871 } 2872 off2 := v_0.AuxInt 2873 sym2 := v_0.Aux 2874 x := v_0.Args[0] 2875 y := v_0.Args[1] 2876 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2877 break 2878 } 2879 v.reset(OpAMD64LEAQ2) 2880 v.AuxInt = off1 + off2 2881 v.Aux = mergeSym(sym1, sym2) 2882 v.AddArg(x) 2883 v.AddArg(y) 2884 return true 2885 } 2886 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 2887 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2888 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 2889 for { 2890 off1 := v.AuxInt 2891 sym1 := v.Aux 2892 v_0 := v.Args[0] 2893 if v_0.Op != OpAMD64LEAQ4 { 2894 break 2895 } 2896 off2 := v_0.AuxInt 2897 sym2 := v_0.Aux 2898 x := v_0.Args[0] 2899 y := v_0.Args[1] 2900 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2901 break 2902 } 2903 v.reset(OpAMD64LEAQ4) 2904 v.AuxInt = off1 + off2 2905 v.Aux = mergeSym(sym1, sym2) 2906 v.AddArg(x) 2907 v.AddArg(y) 2908 return true 2909 } 2910 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 2911 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2912 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 2913 for { 2914 off1 := v.AuxInt 2915 sym1 := v.Aux 2916 v_0 := v.Args[0] 2917 if v_0.Op != OpAMD64LEAQ8 { 2918 break 2919 } 2920 off2 := v_0.AuxInt 2921 sym2 := v_0.Aux 2922 x := v_0.Args[0] 2923 y := v_0.Args[1] 2924 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2925 break 2926 } 2927 v.reset(OpAMD64LEAQ8) 2928 v.AuxInt = off1 + off2 2929 v.Aux = mergeSym(sym1, sym2) 2930 v.AddArg(x) 2931 v.AddArg(y) 2932 return true 2933 } 2934 return false 2935 } 2936 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool { 2937 b := v.Block 2938 _ = b 2939 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 2940 // cond: is32Bit(c+d) && x.Op != OpSB 2941 // result: (LEAQ1 [c+d] {s} x y) 2942 for { 2943 c := v.AuxInt 2944 s := v.Aux 2945 v_0 := v.Args[0] 2946 if v_0.Op != OpAMD64ADDQconst { 2947 break 2948 } 2949 d := v_0.AuxInt 2950 x := v_0.Args[0] 2951 y := v.Args[1] 2952 if !(is32Bit(c+d) && x.Op != OpSB) { 2953 break 2954 } 2955 v.reset(OpAMD64LEAQ1) 2956 v.AuxInt = c + d 2957 v.Aux = s 2958 v.AddArg(x) 2959 v.AddArg(y) 2960 return true 2961 } 2962 // match: (LEAQ1 [c] {s} x (ADDQconst [d] y)) 2963 // cond: is32Bit(c+d) && y.Op != OpSB 2964 // result: (LEAQ1 [c+d] {s} x y) 2965 for { 2966 c := v.AuxInt 2967 s := v.Aux 2968 x := v.Args[0] 2969 v_1 := v.Args[1] 2970 if v_1.Op != OpAMD64ADDQconst { 2971 break 2972 } 2973 d := v_1.AuxInt 2974 y := v_1.Args[0] 2975 if !(is32Bit(c+d) && y.Op != OpSB) { 2976 break 2977 } 2978 v.reset(OpAMD64LEAQ1) 2979 v.AuxInt = c + d 2980 v.Aux = s 2981 v.AddArg(x) 2982 v.AddArg(y) 2983 return true 2984 } 2985 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 2986 // cond: 2987 // result: (LEAQ2 [c] {s} x y) 2988 for { 2989 c := v.AuxInt 2990 s := v.Aux 2991 x := v.Args[0] 2992 v_1 := v.Args[1] 2993 if v_1.Op != OpAMD64SHLQconst { 2994 break 2995 } 2996 if v_1.AuxInt != 1 { 2997 break 2998 } 2999 y := v_1.Args[0] 3000 v.reset(OpAMD64LEAQ2) 3001 v.AuxInt = c 3002 v.Aux = s 3003 v.AddArg(x) 3004 v.AddArg(y) 3005 return true 3006 } 3007 // match: (LEAQ1 [c] {s} (SHLQconst [1] x) y) 3008 // cond: 3009 // result: (LEAQ2 [c] {s} y x) 3010 for { 3011 c := v.AuxInt 3012 s := v.Aux 3013 v_0 := v.Args[0] 3014 if v_0.Op != OpAMD64SHLQconst { 3015 break 3016 } 3017 if v_0.AuxInt != 1 { 3018 break 3019 } 3020 x := v_0.Args[0] 3021 y := v.Args[1] 3022 v.reset(OpAMD64LEAQ2) 3023 v.AuxInt = c 3024 v.Aux = s 3025 v.AddArg(y) 3026 v.AddArg(x) 3027 return true 3028 } 3029 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 3030 // cond: 3031 // result: (LEAQ4 [c] {s} x y) 3032 for { 3033 c := v.AuxInt 3034 s := v.Aux 3035 x := v.Args[0] 3036 v_1 := v.Args[1] 3037 if v_1.Op != OpAMD64SHLQconst { 3038 break 3039 } 3040 if v_1.AuxInt != 2 { 3041 break 3042 } 3043 y := v_1.Args[0] 3044 v.reset(OpAMD64LEAQ4) 3045 v.AuxInt = c 3046 v.Aux = s 3047 v.AddArg(x) 3048 v.AddArg(y) 3049 return true 3050 } 3051 // match: (LEAQ1 [c] {s} (SHLQconst [2] x) y) 3052 // cond: 3053 // result: (LEAQ4 [c] {s} y x) 3054 for { 3055 c := v.AuxInt 3056 s := v.Aux 3057 v_0 := v.Args[0] 3058 if v_0.Op != OpAMD64SHLQconst { 3059 break 3060 } 3061 if v_0.AuxInt != 2 { 3062 break 3063 } 3064 x := v_0.Args[0] 3065 y := v.Args[1] 3066 v.reset(OpAMD64LEAQ4) 3067 v.AuxInt = c 3068 v.Aux = s 3069 v.AddArg(y) 3070 v.AddArg(x) 3071 return true 3072 } 3073 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 3074 // cond: 3075 // result: (LEAQ8 [c] {s} x y) 3076 for { 3077 c := v.AuxInt 3078 s := v.Aux 3079 x := v.Args[0] 3080 v_1 := v.Args[1] 3081 if v_1.Op != OpAMD64SHLQconst { 3082 break 3083 } 3084 if v_1.AuxInt != 3 { 3085 break 3086 } 3087 y := v_1.Args[0] 3088 v.reset(OpAMD64LEAQ8) 3089 v.AuxInt = c 3090 v.Aux = s 3091 v.AddArg(x) 3092 v.AddArg(y) 3093 return true 3094 } 3095 // match: (LEAQ1 [c] {s} (SHLQconst [3] x) y) 3096 // cond: 3097 // result: (LEAQ8 [c] {s} y x) 3098 for { 3099 c := v.AuxInt 3100 s := v.Aux 3101 v_0 := v.Args[0] 3102 if v_0.Op != OpAMD64SHLQconst { 3103 break 3104 } 3105 if v_0.AuxInt != 3 { 3106 break 3107 } 3108 x := v_0.Args[0] 3109 y := v.Args[1] 3110 v.reset(OpAMD64LEAQ8) 3111 v.AuxInt = c 3112 v.Aux = s 3113 v.AddArg(y) 3114 v.AddArg(x) 3115 return true 3116 } 3117 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3118 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3119 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3120 for { 3121 off1 := v.AuxInt 3122 sym1 := v.Aux 3123 v_0 := v.Args[0] 3124 if v_0.Op != OpAMD64LEAQ { 3125 break 3126 } 3127 off2 := v_0.AuxInt 3128 sym2 := v_0.Aux 3129 x := v_0.Args[0] 3130 y := v.Args[1] 3131 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3132 break 3133 } 3134 v.reset(OpAMD64LEAQ1) 3135 v.AuxInt = off1 + off2 3136 v.Aux = mergeSym(sym1, sym2) 3137 v.AddArg(x) 3138 v.AddArg(y) 3139 return true 3140 } 3141 // match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) 3142 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB 3143 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3144 for { 3145 off1 := v.AuxInt 3146 sym1 := v.Aux 3147 x := v.Args[0] 3148 v_1 := v.Args[1] 3149 if v_1.Op != OpAMD64LEAQ { 3150 break 3151 } 3152 off2 := v_1.AuxInt 3153 sym2 := v_1.Aux 3154 y := v_1.Args[0] 3155 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { 3156 break 3157 } 3158 v.reset(OpAMD64LEAQ1) 3159 v.AuxInt = off1 + off2 3160 v.Aux = mergeSym(sym1, sym2) 3161 v.AddArg(x) 3162 v.AddArg(y) 3163 return true 3164 } 3165 return false 3166 } 3167 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool { 3168 b := v.Block 3169 _ = b 3170 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 3171 // cond: is32Bit(c+d) && x.Op != OpSB 3172 // result: (LEAQ2 [c+d] {s} x y) 3173 for { 3174 c := v.AuxInt 3175 s := v.Aux 3176 v_0 := v.Args[0] 3177 if v_0.Op != OpAMD64ADDQconst { 3178 break 3179 } 3180 d := v_0.AuxInt 3181 x := v_0.Args[0] 3182 y := v.Args[1] 3183 if !(is32Bit(c+d) && x.Op != OpSB) { 3184 break 3185 } 3186 v.reset(OpAMD64LEAQ2) 3187 v.AuxInt = c + d 3188 v.Aux = s 3189 v.AddArg(x) 3190 v.AddArg(y) 3191 return true 3192 } 3193 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 3194 // cond: is32Bit(c+2*d) && y.Op != OpSB 3195 // result: (LEAQ2 [c+2*d] {s} x y) 3196 for { 3197 c := v.AuxInt 3198 s := v.Aux 3199 x := v.Args[0] 3200 v_1 := v.Args[1] 3201 if v_1.Op != OpAMD64ADDQconst { 3202 break 3203 } 3204 d := v_1.AuxInt 3205 y := v_1.Args[0] 3206 if !(is32Bit(c+2*d) && y.Op != OpSB) { 3207 break 3208 } 3209 v.reset(OpAMD64LEAQ2) 3210 v.AuxInt = c + 2*d 3211 v.Aux = s 3212 v.AddArg(x) 3213 v.AddArg(y) 3214 return true 3215 } 3216 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 3217 // cond: 3218 // result: (LEAQ4 [c] {s} x y) 3219 for { 3220 c := v.AuxInt 3221 s := v.Aux 3222 x := v.Args[0] 3223 v_1 := v.Args[1] 3224 if v_1.Op != OpAMD64SHLQconst { 3225 break 3226 } 3227 if v_1.AuxInt != 1 { 3228 break 3229 } 3230 y := v_1.Args[0] 3231 v.reset(OpAMD64LEAQ4) 3232 v.AuxInt = c 3233 v.Aux = s 3234 v.AddArg(x) 3235 v.AddArg(y) 3236 return true 3237 } 3238 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 3239 // cond: 3240 // result: (LEAQ8 [c] {s} x y) 3241 for { 3242 c := v.AuxInt 3243 s := v.Aux 3244 x := v.Args[0] 3245 v_1 := v.Args[1] 3246 if v_1.Op != OpAMD64SHLQconst { 3247 break 3248 } 3249 if v_1.AuxInt != 2 { 3250 break 3251 } 3252 y := v_1.Args[0] 3253 v.reset(OpAMD64LEAQ8) 3254 v.AuxInt = c 3255 v.Aux = s 3256 v.AddArg(x) 3257 v.AddArg(y) 3258 return true 3259 } 3260 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3261 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3262 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3263 for { 3264 off1 := v.AuxInt 3265 sym1 := v.Aux 3266 v_0 := v.Args[0] 3267 if v_0.Op != OpAMD64LEAQ { 3268 break 3269 } 3270 off2 := v_0.AuxInt 3271 sym2 := v_0.Aux 3272 x := v_0.Args[0] 3273 y := v.Args[1] 3274 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3275 break 3276 } 3277 v.reset(OpAMD64LEAQ2) 3278 v.AuxInt = off1 + off2 3279 v.Aux = mergeSym(sym1, sym2) 3280 v.AddArg(x) 3281 v.AddArg(y) 3282 return true 3283 } 3284 return false 3285 } 3286 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool { 3287 b := v.Block 3288 _ = b 3289 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 3290 // cond: is32Bit(c+d) && x.Op != OpSB 3291 // result: (LEAQ4 [c+d] {s} x y) 3292 for { 3293 c := v.AuxInt 3294 s := v.Aux 3295 v_0 := v.Args[0] 3296 if v_0.Op != OpAMD64ADDQconst { 3297 break 3298 } 3299 d := v_0.AuxInt 3300 x := v_0.Args[0] 3301 y := v.Args[1] 3302 if !(is32Bit(c+d) && x.Op != OpSB) { 3303 break 3304 } 3305 v.reset(OpAMD64LEAQ4) 3306 v.AuxInt = c + d 3307 v.Aux = s 3308 v.AddArg(x) 3309 v.AddArg(y) 3310 return true 3311 } 3312 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 3313 // cond: is32Bit(c+4*d) && y.Op != OpSB 3314 // result: (LEAQ4 [c+4*d] {s} x y) 3315 for { 3316 c := v.AuxInt 3317 s := v.Aux 3318 x := v.Args[0] 3319 v_1 := v.Args[1] 3320 if v_1.Op != OpAMD64ADDQconst { 3321 break 3322 } 3323 d := v_1.AuxInt 3324 y := v_1.Args[0] 3325 if !(is32Bit(c+4*d) && y.Op != OpSB) { 3326 break 3327 } 3328 v.reset(OpAMD64LEAQ4) 3329 v.AuxInt = c + 4*d 3330 v.Aux = s 3331 v.AddArg(x) 3332 v.AddArg(y) 3333 return true 3334 } 3335 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 3336 // cond: 3337 // result: (LEAQ8 [c] {s} x y) 3338 for { 3339 c := v.AuxInt 3340 s := v.Aux 3341 x := v.Args[0] 3342 v_1 := v.Args[1] 3343 if v_1.Op != OpAMD64SHLQconst { 3344 break 3345 } 3346 if v_1.AuxInt != 1 { 3347 break 3348 } 3349 y := v_1.Args[0] 3350 v.reset(OpAMD64LEAQ8) 3351 v.AuxInt = c 3352 v.Aux = s 3353 v.AddArg(x) 3354 v.AddArg(y) 3355 return true 3356 } 3357 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3358 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3359 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3360 for { 3361 off1 := v.AuxInt 3362 sym1 := v.Aux 3363 v_0 := v.Args[0] 3364 if v_0.Op != OpAMD64LEAQ { 3365 break 3366 } 3367 off2 := v_0.AuxInt 3368 sym2 := v_0.Aux 3369 x := v_0.Args[0] 3370 y := v.Args[1] 3371 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3372 break 3373 } 3374 v.reset(OpAMD64LEAQ4) 3375 v.AuxInt = off1 + off2 3376 v.Aux = mergeSym(sym1, sym2) 3377 v.AddArg(x) 3378 v.AddArg(y) 3379 return true 3380 } 3381 return false 3382 } 3383 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool { 3384 b := v.Block 3385 _ = b 3386 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 3387 // cond: is32Bit(c+d) && x.Op != OpSB 3388 // result: (LEAQ8 [c+d] {s} x y) 3389 for { 3390 c := v.AuxInt 3391 s := v.Aux 3392 v_0 := v.Args[0] 3393 if v_0.Op != OpAMD64ADDQconst { 3394 break 3395 } 3396 d := v_0.AuxInt 3397 x := v_0.Args[0] 3398 y := v.Args[1] 3399 if !(is32Bit(c+d) && x.Op != OpSB) { 3400 break 3401 } 3402 v.reset(OpAMD64LEAQ8) 3403 v.AuxInt = c + d 3404 v.Aux = s 3405 v.AddArg(x) 3406 v.AddArg(y) 3407 return true 3408 } 3409 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 3410 // cond: is32Bit(c+8*d) && y.Op != OpSB 3411 // result: (LEAQ8 [c+8*d] {s} x y) 3412 for { 3413 c := v.AuxInt 3414 s := v.Aux 3415 x := v.Args[0] 3416 v_1 := v.Args[1] 3417 if v_1.Op != OpAMD64ADDQconst { 3418 break 3419 } 3420 d := v_1.AuxInt 3421 y := v_1.Args[0] 3422 if !(is32Bit(c+8*d) && y.Op != OpSB) { 3423 break 3424 } 3425 v.reset(OpAMD64LEAQ8) 3426 v.AuxInt = c + 8*d 3427 v.Aux = s 3428 v.AddArg(x) 3429 v.AddArg(y) 3430 return true 3431 } 3432 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3433 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3434 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3435 for { 3436 off1 := v.AuxInt 3437 sym1 := v.Aux 3438 v_0 := v.Args[0] 3439 if v_0.Op != OpAMD64LEAQ { 3440 break 3441 } 3442 off2 := v_0.AuxInt 3443 sym2 := v_0.Aux 3444 x := v_0.Args[0] 3445 y := v.Args[1] 3446 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3447 break 3448 } 3449 v.reset(OpAMD64LEAQ8) 3450 v.AuxInt = off1 + off2 3451 v.Aux = mergeSym(sym1, sym2) 3452 v.AddArg(x) 3453 v.AddArg(y) 3454 return true 3455 } 3456 return false 3457 } 3458 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { 3459 b := v.Block 3460 _ = b 3461 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 3462 // cond: x.Uses == 1 && clobber(x) 3463 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3464 for { 3465 x := v.Args[0] 3466 if x.Op != OpAMD64MOVBload { 3467 break 3468 } 3469 off := x.AuxInt 3470 sym := x.Aux 3471 ptr := x.Args[0] 3472 mem := x.Args[1] 3473 if !(x.Uses == 1 && clobber(x)) { 3474 break 3475 } 3476 b = x.Block 3477 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 3478 v.reset(OpCopy) 3479 v.AddArg(v0) 3480 v0.AuxInt = off 3481 v0.Aux = sym 3482 v0.AddArg(ptr) 3483 v0.AddArg(mem) 3484 return true 3485 } 3486 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 3487 // cond: x.Uses == 1 && clobber(x) 3488 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3489 for { 3490 x := v.Args[0] 3491 if x.Op != OpAMD64MOVWload { 3492 break 3493 } 3494 off := x.AuxInt 3495 sym := x.Aux 3496 ptr := x.Args[0] 3497 mem := x.Args[1] 3498 if !(x.Uses == 1 && clobber(x)) { 3499 break 3500 } 3501 b = x.Block 3502 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 3503 v.reset(OpCopy) 3504 v.AddArg(v0) 3505 v0.AuxInt = off 3506 v0.Aux = sym 3507 v0.AddArg(ptr) 3508 v0.AddArg(mem) 3509 return true 3510 } 3511 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 3512 // cond: x.Uses == 1 && clobber(x) 3513 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3514 for { 3515 x := v.Args[0] 3516 if x.Op != OpAMD64MOVLload { 3517 break 3518 } 3519 off := x.AuxInt 3520 sym := x.Aux 3521 ptr := x.Args[0] 3522 mem := x.Args[1] 3523 if !(x.Uses == 1 && clobber(x)) { 3524 break 3525 } 3526 b = x.Block 3527 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 3528 v.reset(OpCopy) 3529 v.AddArg(v0) 3530 v0.AuxInt = off 3531 v0.Aux = sym 3532 v0.AddArg(ptr) 3533 v0.AddArg(mem) 3534 return true 3535 } 3536 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 3537 // cond: x.Uses == 1 && clobber(x) 3538 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3539 for { 3540 x := v.Args[0] 3541 if x.Op != OpAMD64MOVQload { 3542 break 3543 } 3544 off := x.AuxInt 3545 sym := x.Aux 3546 ptr := x.Args[0] 3547 mem := x.Args[1] 3548 if !(x.Uses == 1 && clobber(x)) { 3549 break 3550 } 3551 b = x.Block 3552 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 3553 v.reset(OpCopy) 3554 v.AddArg(v0) 3555 v0.AuxInt = off 3556 v0.Aux = sym 3557 v0.AddArg(ptr) 3558 v0.AddArg(mem) 3559 return true 3560 } 3561 // match: (MOVBQSX (ANDLconst [c] x)) 3562 // cond: c & 0x80 == 0 3563 // result: (ANDLconst [c & 0x7f] x) 3564 for { 3565 v_0 := v.Args[0] 3566 if v_0.Op != OpAMD64ANDLconst { 3567 break 3568 } 3569 c := v_0.AuxInt 3570 x := v_0.Args[0] 3571 if !(c&0x80 == 0) { 3572 break 3573 } 3574 v.reset(OpAMD64ANDLconst) 3575 v.AuxInt = c & 0x7f 3576 v.AddArg(x) 3577 return true 3578 } 3579 return false 3580 } 3581 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool { 3582 b := v.Block 3583 _ = b 3584 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 3585 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3586 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3587 for { 3588 off1 := v.AuxInt 3589 sym1 := v.Aux 3590 v_0 := v.Args[0] 3591 if v_0.Op != OpAMD64LEAQ { 3592 break 3593 } 3594 off2 := v_0.AuxInt 3595 sym2 := v_0.Aux 3596 base := v_0.Args[0] 3597 mem := v.Args[1] 3598 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3599 break 3600 } 3601 v.reset(OpAMD64MOVBQSXload) 3602 v.AuxInt = off1 + off2 3603 v.Aux = mergeSym(sym1, sym2) 3604 v.AddArg(base) 3605 v.AddArg(mem) 3606 return true 3607 } 3608 return false 3609 } 3610 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { 3611 b := v.Block 3612 _ = b 3613 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 3614 // cond: x.Uses == 1 && clobber(x) 3615 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3616 for { 3617 x := v.Args[0] 3618 if x.Op != OpAMD64MOVBload { 3619 break 3620 } 3621 off := x.AuxInt 3622 sym := x.Aux 3623 ptr := x.Args[0] 3624 mem := x.Args[1] 3625 if !(x.Uses == 1 && clobber(x)) { 3626 break 3627 } 3628 b = x.Block 3629 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 3630 v.reset(OpCopy) 3631 v.AddArg(v0) 3632 v0.AuxInt = off 3633 v0.Aux = sym 3634 v0.AddArg(ptr) 3635 v0.AddArg(mem) 3636 return true 3637 } 3638 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 3639 // cond: x.Uses == 1 && clobber(x) 3640 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3641 for { 3642 x := v.Args[0] 3643 if x.Op != OpAMD64MOVWload { 3644 break 3645 } 3646 off := x.AuxInt 3647 sym := x.Aux 3648 ptr := x.Args[0] 3649 mem := x.Args[1] 3650 if !(x.Uses == 1 && clobber(x)) { 3651 break 3652 } 3653 b = x.Block 3654 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 3655 v.reset(OpCopy) 3656 v.AddArg(v0) 3657 v0.AuxInt = off 3658 v0.Aux = sym 3659 v0.AddArg(ptr) 3660 v0.AddArg(mem) 3661 return true 3662 } 3663 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 3664 // cond: x.Uses == 1 && clobber(x) 3665 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3666 for { 3667 x := v.Args[0] 3668 if x.Op != OpAMD64MOVLload { 3669 break 3670 } 3671 off := x.AuxInt 3672 sym := x.Aux 3673 ptr := x.Args[0] 3674 mem := x.Args[1] 3675 if !(x.Uses == 1 && clobber(x)) { 3676 break 3677 } 3678 b = x.Block 3679 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 3680 v.reset(OpCopy) 3681 v.AddArg(v0) 3682 v0.AuxInt = off 3683 v0.Aux = sym 3684 v0.AddArg(ptr) 3685 v0.AddArg(mem) 3686 return true 3687 } 3688 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 3689 // cond: x.Uses == 1 && clobber(x) 3690 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3691 for { 3692 x := v.Args[0] 3693 if x.Op != OpAMD64MOVQload { 3694 break 3695 } 3696 off := x.AuxInt 3697 sym := x.Aux 3698 ptr := x.Args[0] 3699 mem := x.Args[1] 3700 if !(x.Uses == 1 && clobber(x)) { 3701 break 3702 } 3703 b = x.Block 3704 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 3705 v.reset(OpCopy) 3706 v.AddArg(v0) 3707 v0.AuxInt = off 3708 v0.Aux = sym 3709 v0.AddArg(ptr) 3710 v0.AddArg(mem) 3711 return true 3712 } 3713 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 3714 // cond: x.Uses == 1 && clobber(x) 3715 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 3716 for { 3717 x := v.Args[0] 3718 if x.Op != OpAMD64MOVBloadidx1 { 3719 break 3720 } 3721 off := x.AuxInt 3722 sym := x.Aux 3723 ptr := x.Args[0] 3724 idx := x.Args[1] 3725 mem := x.Args[2] 3726 if !(x.Uses == 1 && clobber(x)) { 3727 break 3728 } 3729 b = x.Block 3730 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 3731 v.reset(OpCopy) 3732 v.AddArg(v0) 3733 v0.AuxInt = off 3734 v0.Aux = sym 3735 v0.AddArg(ptr) 3736 v0.AddArg(idx) 3737 v0.AddArg(mem) 3738 return true 3739 } 3740 // match: (MOVBQZX (ANDLconst [c] x)) 3741 // cond: 3742 // result: (ANDLconst [c & 0xff] x) 3743 for { 3744 v_0 := v.Args[0] 3745 if v_0.Op != OpAMD64ANDLconst { 3746 break 3747 } 3748 c := v_0.AuxInt 3749 x := v_0.Args[0] 3750 v.reset(OpAMD64ANDLconst) 3751 v.AuxInt = c & 0xff 3752 v.AddArg(x) 3753 return true 3754 } 3755 return false 3756 } 3757 func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool { 3758 b := v.Block 3759 _ = b 3760 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 3761 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 3762 // result: x 3763 for { 3764 off := v.AuxInt 3765 sym := v.Aux 3766 ptr := v.Args[0] 3767 v_1 := v.Args[1] 3768 if v_1.Op != OpAMD64MOVBstore { 3769 break 3770 } 3771 off2 := v_1.AuxInt 3772 sym2 := v_1.Aux 3773 ptr2 := v_1.Args[0] 3774 x := v_1.Args[1] 3775 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 3776 break 3777 } 3778 v.reset(OpCopy) 3779 v.Type = x.Type 3780 v.AddArg(x) 3781 return true 3782 } 3783 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 3784 // cond: is32Bit(off1+off2) 3785 // result: (MOVBload [off1+off2] {sym} ptr mem) 3786 for { 3787 off1 := v.AuxInt 3788 sym := v.Aux 3789 v_0 := v.Args[0] 3790 if v_0.Op != OpAMD64ADDQconst { 3791 break 3792 } 3793 off2 := v_0.AuxInt 3794 ptr := v_0.Args[0] 3795 mem := v.Args[1] 3796 if !(is32Bit(off1 + off2)) { 3797 break 3798 } 3799 v.reset(OpAMD64MOVBload) 3800 v.AuxInt = off1 + off2 3801 v.Aux = sym 3802 v.AddArg(ptr) 3803 v.AddArg(mem) 3804 return true 3805 } 3806 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 3807 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3808 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3809 for { 3810 off1 := v.AuxInt 3811 sym1 := v.Aux 3812 v_0 := v.Args[0] 3813 if v_0.Op != OpAMD64LEAQ { 3814 break 3815 } 3816 off2 := v_0.AuxInt 3817 sym2 := v_0.Aux 3818 base := v_0.Args[0] 3819 mem := v.Args[1] 3820 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3821 break 3822 } 3823 v.reset(OpAMD64MOVBload) 3824 v.AuxInt = off1 + off2 3825 v.Aux = mergeSym(sym1, sym2) 3826 v.AddArg(base) 3827 v.AddArg(mem) 3828 return true 3829 } 3830 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 3831 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3832 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 3833 for { 3834 off1 := v.AuxInt 3835 sym1 := v.Aux 3836 v_0 := v.Args[0] 3837 if v_0.Op != OpAMD64LEAQ1 { 3838 break 3839 } 3840 off2 := v_0.AuxInt 3841 sym2 := v_0.Aux 3842 ptr := v_0.Args[0] 3843 idx := v_0.Args[1] 3844 mem := v.Args[1] 3845 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3846 break 3847 } 3848 v.reset(OpAMD64MOVBloadidx1) 3849 v.AuxInt = off1 + off2 3850 v.Aux = mergeSym(sym1, sym2) 3851 v.AddArg(ptr) 3852 v.AddArg(idx) 3853 v.AddArg(mem) 3854 return true 3855 } 3856 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 3857 // cond: ptr.Op != OpSB 3858 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 3859 for { 3860 off := v.AuxInt 3861 sym := v.Aux 3862 v_0 := v.Args[0] 3863 if v_0.Op != OpAMD64ADDQ { 3864 break 3865 } 3866 ptr := v_0.Args[0] 3867 idx := v_0.Args[1] 3868 mem := v.Args[1] 3869 if !(ptr.Op != OpSB) { 3870 break 3871 } 3872 v.reset(OpAMD64MOVBloadidx1) 3873 v.AuxInt = off 3874 v.Aux = sym 3875 v.AddArg(ptr) 3876 v.AddArg(idx) 3877 v.AddArg(mem) 3878 return true 3879 } 3880 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 3881 // cond: canMergeSym(sym1, sym2) 3882 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3883 for { 3884 off1 := v.AuxInt 3885 sym1 := v.Aux 3886 v_0 := v.Args[0] 3887 if v_0.Op != OpAMD64LEAL { 3888 break 3889 } 3890 off2 := v_0.AuxInt 3891 sym2 := v_0.Aux 3892 base := v_0.Args[0] 3893 mem := v.Args[1] 3894 if !(canMergeSym(sym1, sym2)) { 3895 break 3896 } 3897 v.reset(OpAMD64MOVBload) 3898 v.AuxInt = off1 + off2 3899 v.Aux = mergeSym(sym1, sym2) 3900 v.AddArg(base) 3901 v.AddArg(mem) 3902 return true 3903 } 3904 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 3905 // cond: is32Bit(off1+off2) 3906 // result: (MOVBload [off1+off2] {sym} ptr mem) 3907 for { 3908 off1 := v.AuxInt 3909 sym := v.Aux 3910 v_0 := v.Args[0] 3911 if v_0.Op != OpAMD64ADDLconst { 3912 break 3913 } 3914 off2 := v_0.AuxInt 3915 ptr := v_0.Args[0] 3916 mem := v.Args[1] 3917 if !(is32Bit(off1 + off2)) { 3918 break 3919 } 3920 v.reset(OpAMD64MOVBload) 3921 v.AuxInt = off1 + off2 3922 v.Aux = sym 3923 v.AddArg(ptr) 3924 v.AddArg(mem) 3925 return true 3926 } 3927 return false 3928 } 3929 func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool { 3930 b := v.Block 3931 _ = b 3932 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 3933 // cond: 3934 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 3935 for { 3936 c := v.AuxInt 3937 sym := v.Aux 3938 v_0 := v.Args[0] 3939 if v_0.Op != OpAMD64ADDQconst { 3940 break 3941 } 3942 d := v_0.AuxInt 3943 ptr := v_0.Args[0] 3944 idx := v.Args[1] 3945 mem := v.Args[2] 3946 v.reset(OpAMD64MOVBloadidx1) 3947 v.AuxInt = c + d 3948 v.Aux = sym 3949 v.AddArg(ptr) 3950 v.AddArg(idx) 3951 v.AddArg(mem) 3952 return true 3953 } 3954 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 3955 // cond: 3956 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 3957 for { 3958 c := v.AuxInt 3959 sym := v.Aux 3960 ptr := v.Args[0] 3961 v_1 := v.Args[1] 3962 if v_1.Op != OpAMD64ADDQconst { 3963 break 3964 } 3965 d := v_1.AuxInt 3966 idx := v_1.Args[0] 3967 mem := v.Args[2] 3968 v.reset(OpAMD64MOVBloadidx1) 3969 v.AuxInt = c + d 3970 v.Aux = sym 3971 v.AddArg(ptr) 3972 v.AddArg(idx) 3973 v.AddArg(mem) 3974 return true 3975 } 3976 return false 3977 } 3978 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { 3979 b := v.Block 3980 _ = b 3981 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 3982 // cond: 3983 // result: (MOVBstore [off] {sym} ptr x mem) 3984 for { 3985 off := v.AuxInt 3986 sym := v.Aux 3987 ptr := v.Args[0] 3988 v_1 := v.Args[1] 3989 if v_1.Op != OpAMD64MOVBQSX { 3990 break 3991 } 3992 x := v_1.Args[0] 3993 mem := v.Args[2] 3994 v.reset(OpAMD64MOVBstore) 3995 v.AuxInt = off 3996 v.Aux = sym 3997 v.AddArg(ptr) 3998 v.AddArg(x) 3999 v.AddArg(mem) 4000 return true 4001 } 4002 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 4003 // cond: 4004 // result: (MOVBstore [off] {sym} ptr x mem) 4005 for { 4006 off := v.AuxInt 4007 sym := v.Aux 4008 ptr := v.Args[0] 4009 v_1 := v.Args[1] 4010 if v_1.Op != OpAMD64MOVBQZX { 4011 break 4012 } 4013 x := v_1.Args[0] 4014 mem := v.Args[2] 4015 v.reset(OpAMD64MOVBstore) 4016 v.AuxInt = off 4017 v.Aux = sym 4018 v.AddArg(ptr) 4019 v.AddArg(x) 4020 v.AddArg(mem) 4021 return true 4022 } 4023 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 4024 // cond: is32Bit(off1+off2) 4025 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 4026 for { 4027 off1 := v.AuxInt 4028 sym := v.Aux 4029 v_0 := v.Args[0] 4030 if v_0.Op != OpAMD64ADDQconst { 4031 break 4032 } 4033 off2 := v_0.AuxInt 4034 ptr := v_0.Args[0] 4035 val := v.Args[1] 4036 mem := v.Args[2] 4037 if !(is32Bit(off1 + off2)) { 4038 break 4039 } 4040 v.reset(OpAMD64MOVBstore) 4041 v.AuxInt = off1 + off2 4042 v.Aux = sym 4043 v.AddArg(ptr) 4044 v.AddArg(val) 4045 v.AddArg(mem) 4046 return true 4047 } 4048 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 4049 // cond: validOff(off) 4050 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 4051 for { 4052 off := v.AuxInt 4053 sym := v.Aux 4054 ptr := v.Args[0] 4055 v_1 := v.Args[1] 4056 if v_1.Op != OpAMD64MOVLconst { 4057 break 4058 } 4059 c := v_1.AuxInt 4060 mem := v.Args[2] 4061 if !(validOff(off)) { 4062 break 4063 } 4064 v.reset(OpAMD64MOVBstoreconst) 4065 v.AuxInt = makeValAndOff(int64(int8(c)), off) 4066 v.Aux = sym 4067 v.AddArg(ptr) 4068 v.AddArg(mem) 4069 return true 4070 } 4071 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4072 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4073 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4074 for { 4075 off1 := v.AuxInt 4076 sym1 := v.Aux 4077 v_0 := v.Args[0] 4078 if v_0.Op != OpAMD64LEAQ { 4079 break 4080 } 4081 off2 := v_0.AuxInt 4082 sym2 := v_0.Aux 4083 base := v_0.Args[0] 4084 val := v.Args[1] 4085 mem := v.Args[2] 4086 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4087 break 4088 } 4089 v.reset(OpAMD64MOVBstore) 4090 v.AuxInt = off1 + off2 4091 v.Aux = mergeSym(sym1, sym2) 4092 v.AddArg(base) 4093 v.AddArg(val) 4094 v.AddArg(mem) 4095 return true 4096 } 4097 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 4098 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4099 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 4100 for { 4101 off1 := v.AuxInt 4102 sym1 := v.Aux 4103 v_0 := v.Args[0] 4104 if v_0.Op != OpAMD64LEAQ1 { 4105 break 4106 } 4107 off2 := v_0.AuxInt 4108 sym2 := v_0.Aux 4109 ptr := v_0.Args[0] 4110 idx := v_0.Args[1] 4111 val := v.Args[1] 4112 mem := v.Args[2] 4113 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4114 break 4115 } 4116 v.reset(OpAMD64MOVBstoreidx1) 4117 v.AuxInt = off1 + off2 4118 v.Aux = mergeSym(sym1, sym2) 4119 v.AddArg(ptr) 4120 v.AddArg(idx) 4121 v.AddArg(val) 4122 v.AddArg(mem) 4123 return true 4124 } 4125 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 4126 // cond: ptr.Op != OpSB 4127 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 4128 for { 4129 off := v.AuxInt 4130 sym := v.Aux 4131 v_0 := v.Args[0] 4132 if v_0.Op != OpAMD64ADDQ { 4133 break 4134 } 4135 ptr := v_0.Args[0] 4136 idx := v_0.Args[1] 4137 val := v.Args[1] 4138 mem := v.Args[2] 4139 if !(ptr.Op != OpSB) { 4140 break 4141 } 4142 v.reset(OpAMD64MOVBstoreidx1) 4143 v.AuxInt = off 4144 v.Aux = sym 4145 v.AddArg(ptr) 4146 v.AddArg(idx) 4147 v.AddArg(val) 4148 v.AddArg(mem) 4149 return true 4150 } 4151 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 4152 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 4153 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 4154 for { 4155 i := v.AuxInt 4156 s := v.Aux 4157 p := v.Args[0] 4158 w := v.Args[1] 4159 x2 := v.Args[2] 4160 if x2.Op != OpAMD64MOVBstore { 4161 break 4162 } 4163 if x2.AuxInt != i-1 { 4164 break 4165 } 4166 if x2.Aux != s { 4167 break 4168 } 4169 if p != x2.Args[0] { 4170 break 4171 } 4172 x2_1 := x2.Args[1] 4173 if x2_1.Op != OpAMD64SHRLconst { 4174 break 4175 } 4176 if x2_1.AuxInt != 8 { 4177 break 4178 } 4179 if w != x2_1.Args[0] { 4180 break 4181 } 4182 x1 := x2.Args[2] 4183 if x1.Op != OpAMD64MOVBstore { 4184 break 4185 } 4186 if x1.AuxInt != i-2 { 4187 break 4188 } 4189 if x1.Aux != s { 4190 break 4191 } 4192 if p != x1.Args[0] { 4193 break 4194 } 4195 x1_1 := x1.Args[1] 4196 if x1_1.Op != OpAMD64SHRLconst { 4197 break 4198 } 4199 if x1_1.AuxInt != 16 { 4200 break 4201 } 4202 if w != x1_1.Args[0] { 4203 break 4204 } 4205 x0 := x1.Args[2] 4206 if x0.Op != OpAMD64MOVBstore { 4207 break 4208 } 4209 if x0.AuxInt != i-3 { 4210 break 4211 } 4212 if x0.Aux != s { 4213 break 4214 } 4215 if p != x0.Args[0] { 4216 break 4217 } 4218 x0_1 := x0.Args[1] 4219 if x0_1.Op != OpAMD64SHRLconst { 4220 break 4221 } 4222 if x0_1.AuxInt != 24 { 4223 break 4224 } 4225 if w != x0_1.Args[0] { 4226 break 4227 } 4228 mem := x0.Args[2] 4229 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 4230 break 4231 } 4232 v.reset(OpAMD64MOVLstore) 4233 v.AuxInt = i - 3 4234 v.Aux = s 4235 v.AddArg(p) 4236 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 4237 v0.AddArg(w) 4238 v.AddArg(v0) 4239 v.AddArg(mem) 4240 return true 4241 } 4242 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 4243 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 4244 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 4245 for { 4246 i := v.AuxInt 4247 s := v.Aux 4248 p := v.Args[0] 4249 w := v.Args[1] 4250 x6 := v.Args[2] 4251 if x6.Op != OpAMD64MOVBstore { 4252 break 4253 } 4254 if x6.AuxInt != i-1 { 4255 break 4256 } 4257 if x6.Aux != s { 4258 break 4259 } 4260 if p != x6.Args[0] { 4261 break 4262 } 4263 x6_1 := x6.Args[1] 4264 if x6_1.Op != OpAMD64SHRQconst { 4265 break 4266 } 4267 if x6_1.AuxInt != 8 { 4268 break 4269 } 4270 if w != x6_1.Args[0] { 4271 break 4272 } 4273 x5 := x6.Args[2] 4274 if x5.Op != OpAMD64MOVBstore { 4275 break 4276 } 4277 if x5.AuxInt != i-2 { 4278 break 4279 } 4280 if x5.Aux != s { 4281 break 4282 } 4283 if p != x5.Args[0] { 4284 break 4285 } 4286 x5_1 := x5.Args[1] 4287 if x5_1.Op != OpAMD64SHRQconst { 4288 break 4289 } 4290 if x5_1.AuxInt != 16 { 4291 break 4292 } 4293 if w != x5_1.Args[0] { 4294 break 4295 } 4296 x4 := x5.Args[2] 4297 if x4.Op != OpAMD64MOVBstore { 4298 break 4299 } 4300 if x4.AuxInt != i-3 { 4301 break 4302 } 4303 if x4.Aux != s { 4304 break 4305 } 4306 if p != x4.Args[0] { 4307 break 4308 } 4309 x4_1 := x4.Args[1] 4310 if x4_1.Op != OpAMD64SHRQconst { 4311 break 4312 } 4313 if x4_1.AuxInt != 24 { 4314 break 4315 } 4316 if w != x4_1.Args[0] { 4317 break 4318 } 4319 x3 := x4.Args[2] 4320 if x3.Op != OpAMD64MOVBstore { 4321 break 4322 } 4323 if x3.AuxInt != i-4 { 4324 break 4325 } 4326 if x3.Aux != s { 4327 break 4328 } 4329 if p != x3.Args[0] { 4330 break 4331 } 4332 x3_1 := x3.Args[1] 4333 if x3_1.Op != OpAMD64SHRQconst { 4334 break 4335 } 4336 if x3_1.AuxInt != 32 { 4337 break 4338 } 4339 if w != x3_1.Args[0] { 4340 break 4341 } 4342 x2 := x3.Args[2] 4343 if x2.Op != OpAMD64MOVBstore { 4344 break 4345 } 4346 if x2.AuxInt != i-5 { 4347 break 4348 } 4349 if x2.Aux != s { 4350 break 4351 } 4352 if p != x2.Args[0] { 4353 break 4354 } 4355 x2_1 := x2.Args[1] 4356 if x2_1.Op != OpAMD64SHRQconst { 4357 break 4358 } 4359 if x2_1.AuxInt != 40 { 4360 break 4361 } 4362 if w != x2_1.Args[0] { 4363 break 4364 } 4365 x1 := x2.Args[2] 4366 if x1.Op != OpAMD64MOVBstore { 4367 break 4368 } 4369 if x1.AuxInt != i-6 { 4370 break 4371 } 4372 if x1.Aux != s { 4373 break 4374 } 4375 if p != x1.Args[0] { 4376 break 4377 } 4378 x1_1 := x1.Args[1] 4379 if x1_1.Op != OpAMD64SHRQconst { 4380 break 4381 } 4382 if x1_1.AuxInt != 48 { 4383 break 4384 } 4385 if w != x1_1.Args[0] { 4386 break 4387 } 4388 x0 := x1.Args[2] 4389 if x0.Op != OpAMD64MOVBstore { 4390 break 4391 } 4392 if x0.AuxInt != i-7 { 4393 break 4394 } 4395 if x0.Aux != s { 4396 break 4397 } 4398 if p != x0.Args[0] { 4399 break 4400 } 4401 x0_1 := x0.Args[1] 4402 if x0_1.Op != OpAMD64SHRQconst { 4403 break 4404 } 4405 if x0_1.AuxInt != 56 { 4406 break 4407 } 4408 if w != x0_1.Args[0] { 4409 break 4410 } 4411 mem := x0.Args[2] 4412 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 4413 break 4414 } 4415 v.reset(OpAMD64MOVQstore) 4416 v.AuxInt = i - 7 4417 v.Aux = s 4418 v.AddArg(p) 4419 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 4420 v0.AddArg(w) 4421 v.AddArg(v0) 4422 v.AddArg(mem) 4423 return true 4424 } 4425 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 4426 // cond: x.Uses == 1 && clobber(x) 4427 // result: (MOVWstore [i-1] {s} p w mem) 4428 for { 4429 i := v.AuxInt 4430 s := v.Aux 4431 p := v.Args[0] 4432 v_1 := v.Args[1] 4433 if v_1.Op != OpAMD64SHRQconst { 4434 break 4435 } 4436 if v_1.AuxInt != 8 { 4437 break 4438 } 4439 w := v_1.Args[0] 4440 x := v.Args[2] 4441 if x.Op != OpAMD64MOVBstore { 4442 break 4443 } 4444 if x.AuxInt != i-1 { 4445 break 4446 } 4447 if x.Aux != s { 4448 break 4449 } 4450 if p != x.Args[0] { 4451 break 4452 } 4453 if w != x.Args[1] { 4454 break 4455 } 4456 mem := x.Args[2] 4457 if !(x.Uses == 1 && clobber(x)) { 4458 break 4459 } 4460 v.reset(OpAMD64MOVWstore) 4461 v.AuxInt = i - 1 4462 v.Aux = s 4463 v.AddArg(p) 4464 v.AddArg(w) 4465 v.AddArg(mem) 4466 return true 4467 } 4468 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 4469 // cond: x.Uses == 1 && clobber(x) 4470 // result: (MOVWstore [i-1] {s} p w0 mem) 4471 for { 4472 i := v.AuxInt 4473 s := v.Aux 4474 p := v.Args[0] 4475 v_1 := v.Args[1] 4476 if v_1.Op != OpAMD64SHRQconst { 4477 break 4478 } 4479 j := v_1.AuxInt 4480 w := v_1.Args[0] 4481 x := v.Args[2] 4482 if x.Op != OpAMD64MOVBstore { 4483 break 4484 } 4485 if x.AuxInt != i-1 { 4486 break 4487 } 4488 if x.Aux != s { 4489 break 4490 } 4491 if p != x.Args[0] { 4492 break 4493 } 4494 w0 := x.Args[1] 4495 if w0.Op != OpAMD64SHRQconst { 4496 break 4497 } 4498 if w0.AuxInt != j-8 { 4499 break 4500 } 4501 if w != w0.Args[0] { 4502 break 4503 } 4504 mem := x.Args[2] 4505 if !(x.Uses == 1 && clobber(x)) { 4506 break 4507 } 4508 v.reset(OpAMD64MOVWstore) 4509 v.AuxInt = i - 1 4510 v.Aux = s 4511 v.AddArg(p) 4512 v.AddArg(w0) 4513 v.AddArg(mem) 4514 return true 4515 } 4516 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 4517 // cond: canMergeSym(sym1, sym2) 4518 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4519 for { 4520 off1 := v.AuxInt 4521 sym1 := v.Aux 4522 v_0 := v.Args[0] 4523 if v_0.Op != OpAMD64LEAL { 4524 break 4525 } 4526 off2 := v_0.AuxInt 4527 sym2 := v_0.Aux 4528 base := v_0.Args[0] 4529 val := v.Args[1] 4530 mem := v.Args[2] 4531 if !(canMergeSym(sym1, sym2)) { 4532 break 4533 } 4534 v.reset(OpAMD64MOVBstore) 4535 v.AuxInt = off1 + off2 4536 v.Aux = mergeSym(sym1, sym2) 4537 v.AddArg(base) 4538 v.AddArg(val) 4539 v.AddArg(mem) 4540 return true 4541 } 4542 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 4543 // cond: is32Bit(off1+off2) 4544 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 4545 for { 4546 off1 := v.AuxInt 4547 sym := v.Aux 4548 v_0 := v.Args[0] 4549 if v_0.Op != OpAMD64ADDLconst { 4550 break 4551 } 4552 off2 := v_0.AuxInt 4553 ptr := v_0.Args[0] 4554 val := v.Args[1] 4555 mem := v.Args[2] 4556 if !(is32Bit(off1 + off2)) { 4557 break 4558 } 4559 v.reset(OpAMD64MOVBstore) 4560 v.AuxInt = off1 + off2 4561 v.Aux = sym 4562 v.AddArg(ptr) 4563 v.AddArg(val) 4564 v.AddArg(mem) 4565 return true 4566 } 4567 return false 4568 } 4569 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool { 4570 b := v.Block 4571 _ = b 4572 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 4573 // cond: ValAndOff(sc).canAdd(off) 4574 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 4575 for { 4576 sc := v.AuxInt 4577 s := v.Aux 4578 v_0 := v.Args[0] 4579 if v_0.Op != OpAMD64ADDQconst { 4580 break 4581 } 4582 off := v_0.AuxInt 4583 ptr := v_0.Args[0] 4584 mem := v.Args[1] 4585 if !(ValAndOff(sc).canAdd(off)) { 4586 break 4587 } 4588 v.reset(OpAMD64MOVBstoreconst) 4589 v.AuxInt = ValAndOff(sc).add(off) 4590 v.Aux = s 4591 v.AddArg(ptr) 4592 v.AddArg(mem) 4593 return true 4594 } 4595 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 4596 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 4597 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 4598 for { 4599 sc := v.AuxInt 4600 sym1 := v.Aux 4601 v_0 := v.Args[0] 4602 if v_0.Op != OpAMD64LEAQ { 4603 break 4604 } 4605 off := v_0.AuxInt 4606 sym2 := v_0.Aux 4607 ptr := v_0.Args[0] 4608 mem := v.Args[1] 4609 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 4610 break 4611 } 4612 v.reset(OpAMD64MOVBstoreconst) 4613 v.AuxInt = ValAndOff(sc).add(off) 4614 v.Aux = mergeSym(sym1, sym2) 4615 v.AddArg(ptr) 4616 v.AddArg(mem) 4617 return true 4618 } 4619 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 4620 // cond: canMergeSym(sym1, sym2) 4621 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 4622 for { 4623 x := v.AuxInt 4624 sym1 := v.Aux 4625 v_0 := v.Args[0] 4626 if v_0.Op != OpAMD64LEAQ1 { 4627 break 4628 } 4629 off := v_0.AuxInt 4630 sym2 := v_0.Aux 4631 ptr := v_0.Args[0] 4632 idx := v_0.Args[1] 4633 mem := v.Args[1] 4634 if !(canMergeSym(sym1, sym2)) { 4635 break 4636 } 4637 v.reset(OpAMD64MOVBstoreconstidx1) 4638 v.AuxInt = ValAndOff(x).add(off) 4639 v.Aux = mergeSym(sym1, sym2) 4640 v.AddArg(ptr) 4641 v.AddArg(idx) 4642 v.AddArg(mem) 4643 return true 4644 } 4645 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 4646 // cond: 4647 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 4648 for { 4649 x := v.AuxInt 4650 sym := v.Aux 4651 v_0 := v.Args[0] 4652 if v_0.Op != OpAMD64ADDQ { 4653 break 4654 } 4655 ptr := v_0.Args[0] 4656 idx := v_0.Args[1] 4657 mem := v.Args[1] 4658 v.reset(OpAMD64MOVBstoreconstidx1) 4659 v.AuxInt = x 4660 v.Aux = sym 4661 v.AddArg(ptr) 4662 v.AddArg(idx) 4663 v.AddArg(mem) 4664 return true 4665 } 4666 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 4667 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 4668 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 4669 for { 4670 c := v.AuxInt 4671 s := v.Aux 4672 p := v.Args[0] 4673 x := v.Args[1] 4674 if x.Op != OpAMD64MOVBstoreconst { 4675 break 4676 } 4677 a := x.AuxInt 4678 if x.Aux != s { 4679 break 4680 } 4681 if p != x.Args[0] { 4682 break 4683 } 4684 mem := x.Args[1] 4685 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 4686 break 4687 } 4688 v.reset(OpAMD64MOVWstoreconst) 4689 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 4690 v.Aux = s 4691 v.AddArg(p) 4692 v.AddArg(mem) 4693 return true 4694 } 4695 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 4696 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 4697 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 4698 for { 4699 sc := v.AuxInt 4700 sym1 := v.Aux 4701 v_0 := v.Args[0] 4702 if v_0.Op != OpAMD64LEAL { 4703 break 4704 } 4705 off := v_0.AuxInt 4706 sym2 := v_0.Aux 4707 ptr := v_0.Args[0] 4708 mem := v.Args[1] 4709 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 4710 break 4711 } 4712 v.reset(OpAMD64MOVBstoreconst) 4713 v.AuxInt = ValAndOff(sc).add(off) 4714 v.Aux = mergeSym(sym1, sym2) 4715 v.AddArg(ptr) 4716 v.AddArg(mem) 4717 return true 4718 } 4719 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 4720 // cond: ValAndOff(sc).canAdd(off) 4721 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 4722 for { 4723 sc := v.AuxInt 4724 s := v.Aux 4725 v_0 := v.Args[0] 4726 if v_0.Op != OpAMD64ADDLconst { 4727 break 4728 } 4729 off := v_0.AuxInt 4730 ptr := v_0.Args[0] 4731 mem := v.Args[1] 4732 if !(ValAndOff(sc).canAdd(off)) { 4733 break 4734 } 4735 v.reset(OpAMD64MOVBstoreconst) 4736 v.AuxInt = ValAndOff(sc).add(off) 4737 v.Aux = s 4738 v.AddArg(ptr) 4739 v.AddArg(mem) 4740 return true 4741 } 4742 return false 4743 } 4744 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool { 4745 b := v.Block 4746 _ = b 4747 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 4748 // cond: 4749 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 4750 for { 4751 x := v.AuxInt 4752 sym := v.Aux 4753 v_0 := v.Args[0] 4754 if v_0.Op != OpAMD64ADDQconst { 4755 break 4756 } 4757 c := v_0.AuxInt 4758 ptr := v_0.Args[0] 4759 idx := v.Args[1] 4760 mem := v.Args[2] 4761 v.reset(OpAMD64MOVBstoreconstidx1) 4762 v.AuxInt = ValAndOff(x).add(c) 4763 v.Aux = sym 4764 v.AddArg(ptr) 4765 v.AddArg(idx) 4766 v.AddArg(mem) 4767 return true 4768 } 4769 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 4770 // cond: 4771 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 4772 for { 4773 x := v.AuxInt 4774 sym := v.Aux 4775 ptr := v.Args[0] 4776 v_1 := v.Args[1] 4777 if v_1.Op != OpAMD64ADDQconst { 4778 break 4779 } 4780 c := v_1.AuxInt 4781 idx := v_1.Args[0] 4782 mem := v.Args[2] 4783 v.reset(OpAMD64MOVBstoreconstidx1) 4784 v.AuxInt = ValAndOff(x).add(c) 4785 v.Aux = sym 4786 v.AddArg(ptr) 4787 v.AddArg(idx) 4788 v.AddArg(mem) 4789 return true 4790 } 4791 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 4792 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 4793 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 4794 for { 4795 c := v.AuxInt 4796 s := v.Aux 4797 p := v.Args[0] 4798 i := v.Args[1] 4799 x := v.Args[2] 4800 if x.Op != OpAMD64MOVBstoreconstidx1 { 4801 break 4802 } 4803 a := x.AuxInt 4804 if x.Aux != s { 4805 break 4806 } 4807 if p != x.Args[0] { 4808 break 4809 } 4810 if i != x.Args[1] { 4811 break 4812 } 4813 mem := x.Args[2] 4814 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 4815 break 4816 } 4817 v.reset(OpAMD64MOVWstoreconstidx1) 4818 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 4819 v.Aux = s 4820 v.AddArg(p) 4821 v.AddArg(i) 4822 v.AddArg(mem) 4823 return true 4824 } 4825 return false 4826 } 4827 func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool { 4828 b := v.Block 4829 _ = b 4830 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 4831 // cond: 4832 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 4833 for { 4834 c := v.AuxInt 4835 sym := v.Aux 4836 v_0 := v.Args[0] 4837 if v_0.Op != OpAMD64ADDQconst { 4838 break 4839 } 4840 d := v_0.AuxInt 4841 ptr := v_0.Args[0] 4842 idx := v.Args[1] 4843 val := v.Args[2] 4844 mem := v.Args[3] 4845 v.reset(OpAMD64MOVBstoreidx1) 4846 v.AuxInt = c + d 4847 v.Aux = sym 4848 v.AddArg(ptr) 4849 v.AddArg(idx) 4850 v.AddArg(val) 4851 v.AddArg(mem) 4852 return true 4853 } 4854 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 4855 // cond: 4856 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 4857 for { 4858 c := v.AuxInt 4859 sym := v.Aux 4860 ptr := v.Args[0] 4861 v_1 := v.Args[1] 4862 if v_1.Op != OpAMD64ADDQconst { 4863 break 4864 } 4865 d := v_1.AuxInt 4866 idx := v_1.Args[0] 4867 val := v.Args[2] 4868 mem := v.Args[3] 4869 v.reset(OpAMD64MOVBstoreidx1) 4870 v.AuxInt = c + d 4871 v.Aux = sym 4872 v.AddArg(ptr) 4873 v.AddArg(idx) 4874 v.AddArg(val) 4875 v.AddArg(mem) 4876 return true 4877 } 4878 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 4879 // cond: x.Uses == 1 && clobber(x) 4880 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 4881 for { 4882 i := v.AuxInt 4883 s := v.Aux 4884 p := v.Args[0] 4885 idx := v.Args[1] 4886 v_2 := v.Args[2] 4887 if v_2.Op != OpAMD64SHRQconst { 4888 break 4889 } 4890 if v_2.AuxInt != 8 { 4891 break 4892 } 4893 w := v_2.Args[0] 4894 x := v.Args[3] 4895 if x.Op != OpAMD64MOVBstoreidx1 { 4896 break 4897 } 4898 if x.AuxInt != i-1 { 4899 break 4900 } 4901 if x.Aux != s { 4902 break 4903 } 4904 if p != x.Args[0] { 4905 break 4906 } 4907 if idx != x.Args[1] { 4908 break 4909 } 4910 if w != x.Args[2] { 4911 break 4912 } 4913 mem := x.Args[3] 4914 if !(x.Uses == 1 && clobber(x)) { 4915 break 4916 } 4917 v.reset(OpAMD64MOVWstoreidx1) 4918 v.AuxInt = i - 1 4919 v.Aux = s 4920 v.AddArg(p) 4921 v.AddArg(idx) 4922 v.AddArg(w) 4923 v.AddArg(mem) 4924 return true 4925 } 4926 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 4927 // cond: x.Uses == 1 && clobber(x) 4928 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 4929 for { 4930 i := v.AuxInt 4931 s := v.Aux 4932 p := v.Args[0] 4933 idx := v.Args[1] 4934 v_2 := v.Args[2] 4935 if v_2.Op != OpAMD64SHRQconst { 4936 break 4937 } 4938 j := v_2.AuxInt 4939 w := v_2.Args[0] 4940 x := v.Args[3] 4941 if x.Op != OpAMD64MOVBstoreidx1 { 4942 break 4943 } 4944 if x.AuxInt != i-1 { 4945 break 4946 } 4947 if x.Aux != s { 4948 break 4949 } 4950 if p != x.Args[0] { 4951 break 4952 } 4953 if idx != x.Args[1] { 4954 break 4955 } 4956 w0 := x.Args[2] 4957 if w0.Op != OpAMD64SHRQconst { 4958 break 4959 } 4960 if w0.AuxInt != j-8 { 4961 break 4962 } 4963 if w != w0.Args[0] { 4964 break 4965 } 4966 mem := x.Args[3] 4967 if !(x.Uses == 1 && clobber(x)) { 4968 break 4969 } 4970 v.reset(OpAMD64MOVWstoreidx1) 4971 v.AuxInt = i - 1 4972 v.Aux = s 4973 v.AddArg(p) 4974 v.AddArg(idx) 4975 v.AddArg(w0) 4976 v.AddArg(mem) 4977 return true 4978 } 4979 return false 4980 } 4981 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { 4982 b := v.Block 4983 _ = b 4984 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 4985 // cond: x.Uses == 1 && clobber(x) 4986 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 4987 for { 4988 x := v.Args[0] 4989 if x.Op != OpAMD64MOVLload { 4990 break 4991 } 4992 off := x.AuxInt 4993 sym := x.Aux 4994 ptr := x.Args[0] 4995 mem := x.Args[1] 4996 if !(x.Uses == 1 && clobber(x)) { 4997 break 4998 } 4999 b = x.Block 5000 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 5001 v.reset(OpCopy) 5002 v.AddArg(v0) 5003 v0.AuxInt = off 5004 v0.Aux = sym 5005 v0.AddArg(ptr) 5006 v0.AddArg(mem) 5007 return true 5008 } 5009 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 5010 // cond: x.Uses == 1 && clobber(x) 5011 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 5012 for { 5013 x := v.Args[0] 5014 if x.Op != OpAMD64MOVQload { 5015 break 5016 } 5017 off := x.AuxInt 5018 sym := x.Aux 5019 ptr := x.Args[0] 5020 mem := x.Args[1] 5021 if !(x.Uses == 1 && clobber(x)) { 5022 break 5023 } 5024 b = x.Block 5025 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 5026 v.reset(OpCopy) 5027 v.AddArg(v0) 5028 v0.AuxInt = off 5029 v0.Aux = sym 5030 v0.AddArg(ptr) 5031 v0.AddArg(mem) 5032 return true 5033 } 5034 // match: (MOVLQSX (ANDLconst [c] x)) 5035 // cond: c & 0x80000000 == 0 5036 // result: (ANDLconst [c & 0x7fffffff] x) 5037 for { 5038 v_0 := v.Args[0] 5039 if v_0.Op != OpAMD64ANDLconst { 5040 break 5041 } 5042 c := v_0.AuxInt 5043 x := v_0.Args[0] 5044 if !(c&0x80000000 == 0) { 5045 break 5046 } 5047 v.reset(OpAMD64ANDLconst) 5048 v.AuxInt = c & 0x7fffffff 5049 v.AddArg(x) 5050 return true 5051 } 5052 return false 5053 } 5054 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool { 5055 b := v.Block 5056 _ = b 5057 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 5058 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5059 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5060 for { 5061 off1 := v.AuxInt 5062 sym1 := v.Aux 5063 v_0 := v.Args[0] 5064 if v_0.Op != OpAMD64LEAQ { 5065 break 5066 } 5067 off2 := v_0.AuxInt 5068 sym2 := v_0.Aux 5069 base := v_0.Args[0] 5070 mem := v.Args[1] 5071 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5072 break 5073 } 5074 v.reset(OpAMD64MOVLQSXload) 5075 v.AuxInt = off1 + off2 5076 v.Aux = mergeSym(sym1, sym2) 5077 v.AddArg(base) 5078 v.AddArg(mem) 5079 return true 5080 } 5081 return false 5082 } 5083 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { 5084 b := v.Block 5085 _ = b 5086 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 5087 // cond: x.Uses == 1 && clobber(x) 5088 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 5089 for { 5090 x := v.Args[0] 5091 if x.Op != OpAMD64MOVLload { 5092 break 5093 } 5094 off := x.AuxInt 5095 sym := x.Aux 5096 ptr := x.Args[0] 5097 mem := x.Args[1] 5098 if !(x.Uses == 1 && clobber(x)) { 5099 break 5100 } 5101 b = x.Block 5102 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 5103 v.reset(OpCopy) 5104 v.AddArg(v0) 5105 v0.AuxInt = off 5106 v0.Aux = sym 5107 v0.AddArg(ptr) 5108 v0.AddArg(mem) 5109 return true 5110 } 5111 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 5112 // cond: x.Uses == 1 && clobber(x) 5113 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 5114 for { 5115 x := v.Args[0] 5116 if x.Op != OpAMD64MOVQload { 5117 break 5118 } 5119 off := x.AuxInt 5120 sym := x.Aux 5121 ptr := x.Args[0] 5122 mem := x.Args[1] 5123 if !(x.Uses == 1 && clobber(x)) { 5124 break 5125 } 5126 b = x.Block 5127 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 5128 v.reset(OpCopy) 5129 v.AddArg(v0) 5130 v0.AuxInt = off 5131 v0.Aux = sym 5132 v0.AddArg(ptr) 5133 v0.AddArg(mem) 5134 return true 5135 } 5136 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 5137 // cond: x.Uses == 1 && clobber(x) 5138 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 5139 for { 5140 x := v.Args[0] 5141 if x.Op != OpAMD64MOVLloadidx1 { 5142 break 5143 } 5144 off := x.AuxInt 5145 sym := x.Aux 5146 ptr := x.Args[0] 5147 idx := x.Args[1] 5148 mem := x.Args[2] 5149 if !(x.Uses == 1 && clobber(x)) { 5150 break 5151 } 5152 b = x.Block 5153 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 5154 v.reset(OpCopy) 5155 v.AddArg(v0) 5156 v0.AuxInt = off 5157 v0.Aux = sym 5158 v0.AddArg(ptr) 5159 v0.AddArg(idx) 5160 v0.AddArg(mem) 5161 return true 5162 } 5163 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 5164 // cond: x.Uses == 1 && clobber(x) 5165 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 5166 for { 5167 x := v.Args[0] 5168 if x.Op != OpAMD64MOVLloadidx4 { 5169 break 5170 } 5171 off := x.AuxInt 5172 sym := x.Aux 5173 ptr := x.Args[0] 5174 idx := x.Args[1] 5175 mem := x.Args[2] 5176 if !(x.Uses == 1 && clobber(x)) { 5177 break 5178 } 5179 b = x.Block 5180 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 5181 v.reset(OpCopy) 5182 v.AddArg(v0) 5183 v0.AuxInt = off 5184 v0.Aux = sym 5185 v0.AddArg(ptr) 5186 v0.AddArg(idx) 5187 v0.AddArg(mem) 5188 return true 5189 } 5190 // match: (MOVLQZX (ANDLconst [c] x)) 5191 // cond: 5192 // result: (ANDLconst [c] x) 5193 for { 5194 v_0 := v.Args[0] 5195 if v_0.Op != OpAMD64ANDLconst { 5196 break 5197 } 5198 c := v_0.AuxInt 5199 x := v_0.Args[0] 5200 v.reset(OpAMD64ANDLconst) 5201 v.AuxInt = c 5202 v.AddArg(x) 5203 return true 5204 } 5205 return false 5206 } 5207 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value, config *Config) bool { 5208 b := v.Block 5209 _ = b 5210 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 5211 // cond: is32Bit(off1+off2) 5212 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 5213 for { 5214 off1 := v.AuxInt 5215 sym := v.Aux 5216 v_0 := v.Args[0] 5217 if v_0.Op != OpAMD64ADDQconst { 5218 break 5219 } 5220 off2 := v_0.AuxInt 5221 ptr := v_0.Args[0] 5222 mem := v.Args[1] 5223 if !(is32Bit(off1 + off2)) { 5224 break 5225 } 5226 v.reset(OpAMD64MOVLatomicload) 5227 v.AuxInt = off1 + off2 5228 v.Aux = sym 5229 v.AddArg(ptr) 5230 v.AddArg(mem) 5231 return true 5232 } 5233 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 5234 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5235 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 5236 for { 5237 off1 := v.AuxInt 5238 sym1 := v.Aux 5239 v_0 := v.Args[0] 5240 if v_0.Op != OpAMD64LEAQ { 5241 break 5242 } 5243 off2 := v_0.AuxInt 5244 sym2 := v_0.Aux 5245 ptr := v_0.Args[0] 5246 mem := v.Args[1] 5247 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5248 break 5249 } 5250 v.reset(OpAMD64MOVLatomicload) 5251 v.AuxInt = off1 + off2 5252 v.Aux = mergeSym(sym1, sym2) 5253 v.AddArg(ptr) 5254 v.AddArg(mem) 5255 return true 5256 } 5257 return false 5258 } 5259 func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool { 5260 b := v.Block 5261 _ = b 5262 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 5263 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 5264 // result: x 5265 for { 5266 off := v.AuxInt 5267 sym := v.Aux 5268 ptr := v.Args[0] 5269 v_1 := v.Args[1] 5270 if v_1.Op != OpAMD64MOVLstore { 5271 break 5272 } 5273 off2 := v_1.AuxInt 5274 sym2 := v_1.Aux 5275 ptr2 := v_1.Args[0] 5276 x := v_1.Args[1] 5277 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 5278 break 5279 } 5280 v.reset(OpCopy) 5281 v.Type = x.Type 5282 v.AddArg(x) 5283 return true 5284 } 5285 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 5286 // cond: is32Bit(off1+off2) 5287 // result: (MOVLload [off1+off2] {sym} ptr mem) 5288 for { 5289 off1 := v.AuxInt 5290 sym := v.Aux 5291 v_0 := v.Args[0] 5292 if v_0.Op != OpAMD64ADDQconst { 5293 break 5294 } 5295 off2 := v_0.AuxInt 5296 ptr := v_0.Args[0] 5297 mem := v.Args[1] 5298 if !(is32Bit(off1 + off2)) { 5299 break 5300 } 5301 v.reset(OpAMD64MOVLload) 5302 v.AuxInt = off1 + off2 5303 v.Aux = sym 5304 v.AddArg(ptr) 5305 v.AddArg(mem) 5306 return true 5307 } 5308 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 5309 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5310 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5311 for { 5312 off1 := v.AuxInt 5313 sym1 := v.Aux 5314 v_0 := v.Args[0] 5315 if v_0.Op != OpAMD64LEAQ { 5316 break 5317 } 5318 off2 := v_0.AuxInt 5319 sym2 := v_0.Aux 5320 base := v_0.Args[0] 5321 mem := v.Args[1] 5322 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5323 break 5324 } 5325 v.reset(OpAMD64MOVLload) 5326 v.AuxInt = off1 + off2 5327 v.Aux = mergeSym(sym1, sym2) 5328 v.AddArg(base) 5329 v.AddArg(mem) 5330 return true 5331 } 5332 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 5333 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5334 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 5335 for { 5336 off1 := v.AuxInt 5337 sym1 := v.Aux 5338 v_0 := v.Args[0] 5339 if v_0.Op != OpAMD64LEAQ1 { 5340 break 5341 } 5342 off2 := v_0.AuxInt 5343 sym2 := v_0.Aux 5344 ptr := v_0.Args[0] 5345 idx := v_0.Args[1] 5346 mem := v.Args[1] 5347 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5348 break 5349 } 5350 v.reset(OpAMD64MOVLloadidx1) 5351 v.AuxInt = off1 + off2 5352 v.Aux = mergeSym(sym1, sym2) 5353 v.AddArg(ptr) 5354 v.AddArg(idx) 5355 v.AddArg(mem) 5356 return true 5357 } 5358 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 5359 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5360 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 5361 for { 5362 off1 := v.AuxInt 5363 sym1 := v.Aux 5364 v_0 := v.Args[0] 5365 if v_0.Op != OpAMD64LEAQ4 { 5366 break 5367 } 5368 off2 := v_0.AuxInt 5369 sym2 := v_0.Aux 5370 ptr := v_0.Args[0] 5371 idx := v_0.Args[1] 5372 mem := v.Args[1] 5373 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5374 break 5375 } 5376 v.reset(OpAMD64MOVLloadidx4) 5377 v.AuxInt = off1 + off2 5378 v.Aux = mergeSym(sym1, sym2) 5379 v.AddArg(ptr) 5380 v.AddArg(idx) 5381 v.AddArg(mem) 5382 return true 5383 } 5384 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 5385 // cond: ptr.Op != OpSB 5386 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 5387 for { 5388 off := v.AuxInt 5389 sym := v.Aux 5390 v_0 := v.Args[0] 5391 if v_0.Op != OpAMD64ADDQ { 5392 break 5393 } 5394 ptr := v_0.Args[0] 5395 idx := v_0.Args[1] 5396 mem := v.Args[1] 5397 if !(ptr.Op != OpSB) { 5398 break 5399 } 5400 v.reset(OpAMD64MOVLloadidx1) 5401 v.AuxInt = off 5402 v.Aux = sym 5403 v.AddArg(ptr) 5404 v.AddArg(idx) 5405 v.AddArg(mem) 5406 return true 5407 } 5408 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 5409 // cond: canMergeSym(sym1, sym2) 5410 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5411 for { 5412 off1 := v.AuxInt 5413 sym1 := v.Aux 5414 v_0 := v.Args[0] 5415 if v_0.Op != OpAMD64LEAL { 5416 break 5417 } 5418 off2 := v_0.AuxInt 5419 sym2 := v_0.Aux 5420 base := v_0.Args[0] 5421 mem := v.Args[1] 5422 if !(canMergeSym(sym1, sym2)) { 5423 break 5424 } 5425 v.reset(OpAMD64MOVLload) 5426 v.AuxInt = off1 + off2 5427 v.Aux = mergeSym(sym1, sym2) 5428 v.AddArg(base) 5429 v.AddArg(mem) 5430 return true 5431 } 5432 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 5433 // cond: is32Bit(off1+off2) 5434 // result: (MOVLload [off1+off2] {sym} ptr mem) 5435 for { 5436 off1 := v.AuxInt 5437 sym := v.Aux 5438 v_0 := v.Args[0] 5439 if v_0.Op != OpAMD64ADDLconst { 5440 break 5441 } 5442 off2 := v_0.AuxInt 5443 ptr := v_0.Args[0] 5444 mem := v.Args[1] 5445 if !(is32Bit(off1 + off2)) { 5446 break 5447 } 5448 v.reset(OpAMD64MOVLload) 5449 v.AuxInt = off1 + off2 5450 v.Aux = sym 5451 v.AddArg(ptr) 5452 v.AddArg(mem) 5453 return true 5454 } 5455 return false 5456 } 5457 func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool { 5458 b := v.Block 5459 _ = b 5460 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 5461 // cond: 5462 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 5463 for { 5464 c := v.AuxInt 5465 sym := v.Aux 5466 ptr := v.Args[0] 5467 v_1 := v.Args[1] 5468 if v_1.Op != OpAMD64SHLQconst { 5469 break 5470 } 5471 if v_1.AuxInt != 2 { 5472 break 5473 } 5474 idx := v_1.Args[0] 5475 mem := v.Args[2] 5476 v.reset(OpAMD64MOVLloadidx4) 5477 v.AuxInt = c 5478 v.Aux = sym 5479 v.AddArg(ptr) 5480 v.AddArg(idx) 5481 v.AddArg(mem) 5482 return true 5483 } 5484 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 5485 // cond: 5486 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 5487 for { 5488 c := v.AuxInt 5489 sym := v.Aux 5490 v_0 := v.Args[0] 5491 if v_0.Op != OpAMD64ADDQconst { 5492 break 5493 } 5494 d := v_0.AuxInt 5495 ptr := v_0.Args[0] 5496 idx := v.Args[1] 5497 mem := v.Args[2] 5498 v.reset(OpAMD64MOVLloadidx1) 5499 v.AuxInt = c + d 5500 v.Aux = sym 5501 v.AddArg(ptr) 5502 v.AddArg(idx) 5503 v.AddArg(mem) 5504 return true 5505 } 5506 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 5507 // cond: 5508 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 5509 for { 5510 c := v.AuxInt 5511 sym := v.Aux 5512 ptr := v.Args[0] 5513 v_1 := v.Args[1] 5514 if v_1.Op != OpAMD64ADDQconst { 5515 break 5516 } 5517 d := v_1.AuxInt 5518 idx := v_1.Args[0] 5519 mem := v.Args[2] 5520 v.reset(OpAMD64MOVLloadidx1) 5521 v.AuxInt = c + d 5522 v.Aux = sym 5523 v.AddArg(ptr) 5524 v.AddArg(idx) 5525 v.AddArg(mem) 5526 return true 5527 } 5528 return false 5529 } 5530 func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool { 5531 b := v.Block 5532 _ = b 5533 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 5534 // cond: 5535 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 5536 for { 5537 c := v.AuxInt 5538 sym := v.Aux 5539 v_0 := v.Args[0] 5540 if v_0.Op != OpAMD64ADDQconst { 5541 break 5542 } 5543 d := v_0.AuxInt 5544 ptr := v_0.Args[0] 5545 idx := v.Args[1] 5546 mem := v.Args[2] 5547 v.reset(OpAMD64MOVLloadidx4) 5548 v.AuxInt = c + d 5549 v.Aux = sym 5550 v.AddArg(ptr) 5551 v.AddArg(idx) 5552 v.AddArg(mem) 5553 return true 5554 } 5555 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 5556 // cond: 5557 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 5558 for { 5559 c := v.AuxInt 5560 sym := v.Aux 5561 ptr := v.Args[0] 5562 v_1 := v.Args[1] 5563 if v_1.Op != OpAMD64ADDQconst { 5564 break 5565 } 5566 d := v_1.AuxInt 5567 idx := v_1.Args[0] 5568 mem := v.Args[2] 5569 v.reset(OpAMD64MOVLloadidx4) 5570 v.AuxInt = c + 4*d 5571 v.Aux = sym 5572 v.AddArg(ptr) 5573 v.AddArg(idx) 5574 v.AddArg(mem) 5575 return true 5576 } 5577 return false 5578 } 5579 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool { 5580 b := v.Block 5581 _ = b 5582 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 5583 // cond: 5584 // result: (MOVLstore [off] {sym} ptr x mem) 5585 for { 5586 off := v.AuxInt 5587 sym := v.Aux 5588 ptr := v.Args[0] 5589 v_1 := v.Args[1] 5590 if v_1.Op != OpAMD64MOVLQSX { 5591 break 5592 } 5593 x := v_1.Args[0] 5594 mem := v.Args[2] 5595 v.reset(OpAMD64MOVLstore) 5596 v.AuxInt = off 5597 v.Aux = sym 5598 v.AddArg(ptr) 5599 v.AddArg(x) 5600 v.AddArg(mem) 5601 return true 5602 } 5603 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 5604 // cond: 5605 // result: (MOVLstore [off] {sym} ptr x mem) 5606 for { 5607 off := v.AuxInt 5608 sym := v.Aux 5609 ptr := v.Args[0] 5610 v_1 := v.Args[1] 5611 if v_1.Op != OpAMD64MOVLQZX { 5612 break 5613 } 5614 x := v_1.Args[0] 5615 mem := v.Args[2] 5616 v.reset(OpAMD64MOVLstore) 5617 v.AuxInt = off 5618 v.Aux = sym 5619 v.AddArg(ptr) 5620 v.AddArg(x) 5621 v.AddArg(mem) 5622 return true 5623 } 5624 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 5625 // cond: is32Bit(off1+off2) 5626 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 5627 for { 5628 off1 := v.AuxInt 5629 sym := v.Aux 5630 v_0 := v.Args[0] 5631 if v_0.Op != OpAMD64ADDQconst { 5632 break 5633 } 5634 off2 := v_0.AuxInt 5635 ptr := v_0.Args[0] 5636 val := v.Args[1] 5637 mem := v.Args[2] 5638 if !(is32Bit(off1 + off2)) { 5639 break 5640 } 5641 v.reset(OpAMD64MOVLstore) 5642 v.AuxInt = off1 + off2 5643 v.Aux = sym 5644 v.AddArg(ptr) 5645 v.AddArg(val) 5646 v.AddArg(mem) 5647 return true 5648 } 5649 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 5650 // cond: validOff(off) 5651 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 5652 for { 5653 off := v.AuxInt 5654 sym := v.Aux 5655 ptr := v.Args[0] 5656 v_1 := v.Args[1] 5657 if v_1.Op != OpAMD64MOVLconst { 5658 break 5659 } 5660 c := v_1.AuxInt 5661 mem := v.Args[2] 5662 if !(validOff(off)) { 5663 break 5664 } 5665 v.reset(OpAMD64MOVLstoreconst) 5666 v.AuxInt = makeValAndOff(int64(int32(c)), off) 5667 v.Aux = sym 5668 v.AddArg(ptr) 5669 v.AddArg(mem) 5670 return true 5671 } 5672 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5673 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5674 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5675 for { 5676 off1 := v.AuxInt 5677 sym1 := v.Aux 5678 v_0 := v.Args[0] 5679 if v_0.Op != OpAMD64LEAQ { 5680 break 5681 } 5682 off2 := v_0.AuxInt 5683 sym2 := v_0.Aux 5684 base := v_0.Args[0] 5685 val := v.Args[1] 5686 mem := v.Args[2] 5687 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5688 break 5689 } 5690 v.reset(OpAMD64MOVLstore) 5691 v.AuxInt = off1 + off2 5692 v.Aux = mergeSym(sym1, sym2) 5693 v.AddArg(base) 5694 v.AddArg(val) 5695 v.AddArg(mem) 5696 return true 5697 } 5698 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 5699 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5700 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 5701 for { 5702 off1 := v.AuxInt 5703 sym1 := v.Aux 5704 v_0 := v.Args[0] 5705 if v_0.Op != OpAMD64LEAQ1 { 5706 break 5707 } 5708 off2 := v_0.AuxInt 5709 sym2 := v_0.Aux 5710 ptr := v_0.Args[0] 5711 idx := v_0.Args[1] 5712 val := v.Args[1] 5713 mem := v.Args[2] 5714 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5715 break 5716 } 5717 v.reset(OpAMD64MOVLstoreidx1) 5718 v.AuxInt = off1 + off2 5719 v.Aux = mergeSym(sym1, sym2) 5720 v.AddArg(ptr) 5721 v.AddArg(idx) 5722 v.AddArg(val) 5723 v.AddArg(mem) 5724 return true 5725 } 5726 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 5727 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5728 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 5729 for { 5730 off1 := v.AuxInt 5731 sym1 := v.Aux 5732 v_0 := v.Args[0] 5733 if v_0.Op != OpAMD64LEAQ4 { 5734 break 5735 } 5736 off2 := v_0.AuxInt 5737 sym2 := v_0.Aux 5738 ptr := v_0.Args[0] 5739 idx := v_0.Args[1] 5740 val := v.Args[1] 5741 mem := v.Args[2] 5742 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5743 break 5744 } 5745 v.reset(OpAMD64MOVLstoreidx4) 5746 v.AuxInt = off1 + off2 5747 v.Aux = mergeSym(sym1, sym2) 5748 v.AddArg(ptr) 5749 v.AddArg(idx) 5750 v.AddArg(val) 5751 v.AddArg(mem) 5752 return true 5753 } 5754 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 5755 // cond: ptr.Op != OpSB 5756 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 5757 for { 5758 off := v.AuxInt 5759 sym := v.Aux 5760 v_0 := v.Args[0] 5761 if v_0.Op != OpAMD64ADDQ { 5762 break 5763 } 5764 ptr := v_0.Args[0] 5765 idx := v_0.Args[1] 5766 val := v.Args[1] 5767 mem := v.Args[2] 5768 if !(ptr.Op != OpSB) { 5769 break 5770 } 5771 v.reset(OpAMD64MOVLstoreidx1) 5772 v.AuxInt = off 5773 v.Aux = sym 5774 v.AddArg(ptr) 5775 v.AddArg(idx) 5776 v.AddArg(val) 5777 v.AddArg(mem) 5778 return true 5779 } 5780 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 5781 // cond: x.Uses == 1 && clobber(x) 5782 // result: (MOVQstore [i-4] {s} p w mem) 5783 for { 5784 i := v.AuxInt 5785 s := v.Aux 5786 p := v.Args[0] 5787 v_1 := v.Args[1] 5788 if v_1.Op != OpAMD64SHRQconst { 5789 break 5790 } 5791 if v_1.AuxInt != 32 { 5792 break 5793 } 5794 w := v_1.Args[0] 5795 x := v.Args[2] 5796 if x.Op != OpAMD64MOVLstore { 5797 break 5798 } 5799 if x.AuxInt != i-4 { 5800 break 5801 } 5802 if x.Aux != s { 5803 break 5804 } 5805 if p != x.Args[0] { 5806 break 5807 } 5808 if w != x.Args[1] { 5809 break 5810 } 5811 mem := x.Args[2] 5812 if !(x.Uses == 1 && clobber(x)) { 5813 break 5814 } 5815 v.reset(OpAMD64MOVQstore) 5816 v.AuxInt = i - 4 5817 v.Aux = s 5818 v.AddArg(p) 5819 v.AddArg(w) 5820 v.AddArg(mem) 5821 return true 5822 } 5823 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 5824 // cond: x.Uses == 1 && clobber(x) 5825 // result: (MOVQstore [i-4] {s} p w0 mem) 5826 for { 5827 i := v.AuxInt 5828 s := v.Aux 5829 p := v.Args[0] 5830 v_1 := v.Args[1] 5831 if v_1.Op != OpAMD64SHRQconst { 5832 break 5833 } 5834 j := v_1.AuxInt 5835 w := v_1.Args[0] 5836 x := v.Args[2] 5837 if x.Op != OpAMD64MOVLstore { 5838 break 5839 } 5840 if x.AuxInt != i-4 { 5841 break 5842 } 5843 if x.Aux != s { 5844 break 5845 } 5846 if p != x.Args[0] { 5847 break 5848 } 5849 w0 := x.Args[1] 5850 if w0.Op != OpAMD64SHRQconst { 5851 break 5852 } 5853 if w0.AuxInt != j-32 { 5854 break 5855 } 5856 if w != w0.Args[0] { 5857 break 5858 } 5859 mem := x.Args[2] 5860 if !(x.Uses == 1 && clobber(x)) { 5861 break 5862 } 5863 v.reset(OpAMD64MOVQstore) 5864 v.AuxInt = i - 4 5865 v.Aux = s 5866 v.AddArg(p) 5867 v.AddArg(w0) 5868 v.AddArg(mem) 5869 return true 5870 } 5871 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 5872 // cond: canMergeSym(sym1, sym2) 5873 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5874 for { 5875 off1 := v.AuxInt 5876 sym1 := v.Aux 5877 v_0 := v.Args[0] 5878 if v_0.Op != OpAMD64LEAL { 5879 break 5880 } 5881 off2 := v_0.AuxInt 5882 sym2 := v_0.Aux 5883 base := v_0.Args[0] 5884 val := v.Args[1] 5885 mem := v.Args[2] 5886 if !(canMergeSym(sym1, sym2)) { 5887 break 5888 } 5889 v.reset(OpAMD64MOVLstore) 5890 v.AuxInt = off1 + off2 5891 v.Aux = mergeSym(sym1, sym2) 5892 v.AddArg(base) 5893 v.AddArg(val) 5894 v.AddArg(mem) 5895 return true 5896 } 5897 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 5898 // cond: is32Bit(off1+off2) 5899 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 5900 for { 5901 off1 := v.AuxInt 5902 sym := v.Aux 5903 v_0 := v.Args[0] 5904 if v_0.Op != OpAMD64ADDLconst { 5905 break 5906 } 5907 off2 := v_0.AuxInt 5908 ptr := v_0.Args[0] 5909 val := v.Args[1] 5910 mem := v.Args[2] 5911 if !(is32Bit(off1 + off2)) { 5912 break 5913 } 5914 v.reset(OpAMD64MOVLstore) 5915 v.AuxInt = off1 + off2 5916 v.Aux = sym 5917 v.AddArg(ptr) 5918 v.AddArg(val) 5919 v.AddArg(mem) 5920 return true 5921 } 5922 return false 5923 } 5924 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool { 5925 b := v.Block 5926 _ = b 5927 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 5928 // cond: ValAndOff(sc).canAdd(off) 5929 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5930 for { 5931 sc := v.AuxInt 5932 s := v.Aux 5933 v_0 := v.Args[0] 5934 if v_0.Op != OpAMD64ADDQconst { 5935 break 5936 } 5937 off := v_0.AuxInt 5938 ptr := v_0.Args[0] 5939 mem := v.Args[1] 5940 if !(ValAndOff(sc).canAdd(off)) { 5941 break 5942 } 5943 v.reset(OpAMD64MOVLstoreconst) 5944 v.AuxInt = ValAndOff(sc).add(off) 5945 v.Aux = s 5946 v.AddArg(ptr) 5947 v.AddArg(mem) 5948 return true 5949 } 5950 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 5951 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5952 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5953 for { 5954 sc := v.AuxInt 5955 sym1 := v.Aux 5956 v_0 := v.Args[0] 5957 if v_0.Op != OpAMD64LEAQ { 5958 break 5959 } 5960 off := v_0.AuxInt 5961 sym2 := v_0.Aux 5962 ptr := v_0.Args[0] 5963 mem := v.Args[1] 5964 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5965 break 5966 } 5967 v.reset(OpAMD64MOVLstoreconst) 5968 v.AuxInt = ValAndOff(sc).add(off) 5969 v.Aux = mergeSym(sym1, sym2) 5970 v.AddArg(ptr) 5971 v.AddArg(mem) 5972 return true 5973 } 5974 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 5975 // cond: canMergeSym(sym1, sym2) 5976 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5977 for { 5978 x := v.AuxInt 5979 sym1 := v.Aux 5980 v_0 := v.Args[0] 5981 if v_0.Op != OpAMD64LEAQ1 { 5982 break 5983 } 5984 off := v_0.AuxInt 5985 sym2 := v_0.Aux 5986 ptr := v_0.Args[0] 5987 idx := v_0.Args[1] 5988 mem := v.Args[1] 5989 if !(canMergeSym(sym1, sym2)) { 5990 break 5991 } 5992 v.reset(OpAMD64MOVLstoreconstidx1) 5993 v.AuxInt = ValAndOff(x).add(off) 5994 v.Aux = mergeSym(sym1, sym2) 5995 v.AddArg(ptr) 5996 v.AddArg(idx) 5997 v.AddArg(mem) 5998 return true 5999 } 6000 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 6001 // cond: canMergeSym(sym1, sym2) 6002 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6003 for { 6004 x := v.AuxInt 6005 sym1 := v.Aux 6006 v_0 := v.Args[0] 6007 if v_0.Op != OpAMD64LEAQ4 { 6008 break 6009 } 6010 off := v_0.AuxInt 6011 sym2 := v_0.Aux 6012 ptr := v_0.Args[0] 6013 idx := v_0.Args[1] 6014 mem := v.Args[1] 6015 if !(canMergeSym(sym1, sym2)) { 6016 break 6017 } 6018 v.reset(OpAMD64MOVLstoreconstidx4) 6019 v.AuxInt = ValAndOff(x).add(off) 6020 v.Aux = mergeSym(sym1, sym2) 6021 v.AddArg(ptr) 6022 v.AddArg(idx) 6023 v.AddArg(mem) 6024 return true 6025 } 6026 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 6027 // cond: 6028 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 6029 for { 6030 x := v.AuxInt 6031 sym := v.Aux 6032 v_0 := v.Args[0] 6033 if v_0.Op != OpAMD64ADDQ { 6034 break 6035 } 6036 ptr := v_0.Args[0] 6037 idx := v_0.Args[1] 6038 mem := v.Args[1] 6039 v.reset(OpAMD64MOVLstoreconstidx1) 6040 v.AuxInt = x 6041 v.Aux = sym 6042 v.AddArg(ptr) 6043 v.AddArg(idx) 6044 v.AddArg(mem) 6045 return true 6046 } 6047 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 6048 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 6049 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 6050 for { 6051 c := v.AuxInt 6052 s := v.Aux 6053 p := v.Args[0] 6054 x := v.Args[1] 6055 if x.Op != OpAMD64MOVLstoreconst { 6056 break 6057 } 6058 a := x.AuxInt 6059 if x.Aux != s { 6060 break 6061 } 6062 if p != x.Args[0] { 6063 break 6064 } 6065 mem := x.Args[1] 6066 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 6067 break 6068 } 6069 v.reset(OpAMD64MOVQstore) 6070 v.AuxInt = ValAndOff(a).Off() 6071 v.Aux = s 6072 v.AddArg(p) 6073 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 6074 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 6075 v.AddArg(v0) 6076 v.AddArg(mem) 6077 return true 6078 } 6079 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 6080 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6081 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6082 for { 6083 sc := v.AuxInt 6084 sym1 := v.Aux 6085 v_0 := v.Args[0] 6086 if v_0.Op != OpAMD64LEAL { 6087 break 6088 } 6089 off := v_0.AuxInt 6090 sym2 := v_0.Aux 6091 ptr := v_0.Args[0] 6092 mem := v.Args[1] 6093 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6094 break 6095 } 6096 v.reset(OpAMD64MOVLstoreconst) 6097 v.AuxInt = ValAndOff(sc).add(off) 6098 v.Aux = mergeSym(sym1, sym2) 6099 v.AddArg(ptr) 6100 v.AddArg(mem) 6101 return true 6102 } 6103 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 6104 // cond: ValAndOff(sc).canAdd(off) 6105 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6106 for { 6107 sc := v.AuxInt 6108 s := v.Aux 6109 v_0 := v.Args[0] 6110 if v_0.Op != OpAMD64ADDLconst { 6111 break 6112 } 6113 off := v_0.AuxInt 6114 ptr := v_0.Args[0] 6115 mem := v.Args[1] 6116 if !(ValAndOff(sc).canAdd(off)) { 6117 break 6118 } 6119 v.reset(OpAMD64MOVLstoreconst) 6120 v.AuxInt = ValAndOff(sc).add(off) 6121 v.Aux = s 6122 v.AddArg(ptr) 6123 v.AddArg(mem) 6124 return true 6125 } 6126 return false 6127 } 6128 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool { 6129 b := v.Block 6130 _ = b 6131 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 6132 // cond: 6133 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 6134 for { 6135 c := v.AuxInt 6136 sym := v.Aux 6137 ptr := v.Args[0] 6138 v_1 := v.Args[1] 6139 if v_1.Op != OpAMD64SHLQconst { 6140 break 6141 } 6142 if v_1.AuxInt != 2 { 6143 break 6144 } 6145 idx := v_1.Args[0] 6146 mem := v.Args[2] 6147 v.reset(OpAMD64MOVLstoreconstidx4) 6148 v.AuxInt = c 6149 v.Aux = sym 6150 v.AddArg(ptr) 6151 v.AddArg(idx) 6152 v.AddArg(mem) 6153 return true 6154 } 6155 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 6156 // cond: 6157 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6158 for { 6159 x := v.AuxInt 6160 sym := v.Aux 6161 v_0 := v.Args[0] 6162 if v_0.Op != OpAMD64ADDQconst { 6163 break 6164 } 6165 c := v_0.AuxInt 6166 ptr := v_0.Args[0] 6167 idx := v.Args[1] 6168 mem := v.Args[2] 6169 v.reset(OpAMD64MOVLstoreconstidx1) 6170 v.AuxInt = ValAndOff(x).add(c) 6171 v.Aux = sym 6172 v.AddArg(ptr) 6173 v.AddArg(idx) 6174 v.AddArg(mem) 6175 return true 6176 } 6177 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 6178 // cond: 6179 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6180 for { 6181 x := v.AuxInt 6182 sym := v.Aux 6183 ptr := v.Args[0] 6184 v_1 := v.Args[1] 6185 if v_1.Op != OpAMD64ADDQconst { 6186 break 6187 } 6188 c := v_1.AuxInt 6189 idx := v_1.Args[0] 6190 mem := v.Args[2] 6191 v.reset(OpAMD64MOVLstoreconstidx1) 6192 v.AuxInt = ValAndOff(x).add(c) 6193 v.Aux = sym 6194 v.AddArg(ptr) 6195 v.AddArg(idx) 6196 v.AddArg(mem) 6197 return true 6198 } 6199 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 6200 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 6201 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 6202 for { 6203 c := v.AuxInt 6204 s := v.Aux 6205 p := v.Args[0] 6206 i := v.Args[1] 6207 x := v.Args[2] 6208 if x.Op != OpAMD64MOVLstoreconstidx1 { 6209 break 6210 } 6211 a := x.AuxInt 6212 if x.Aux != s { 6213 break 6214 } 6215 if p != x.Args[0] { 6216 break 6217 } 6218 if i != x.Args[1] { 6219 break 6220 } 6221 mem := x.Args[2] 6222 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 6223 break 6224 } 6225 v.reset(OpAMD64MOVQstoreidx1) 6226 v.AuxInt = ValAndOff(a).Off() 6227 v.Aux = s 6228 v.AddArg(p) 6229 v.AddArg(i) 6230 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 6231 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 6232 v.AddArg(v0) 6233 v.AddArg(mem) 6234 return true 6235 } 6236 return false 6237 } 6238 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool { 6239 b := v.Block 6240 _ = b 6241 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 6242 // cond: 6243 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6244 for { 6245 x := v.AuxInt 6246 sym := v.Aux 6247 v_0 := v.Args[0] 6248 if v_0.Op != OpAMD64ADDQconst { 6249 break 6250 } 6251 c := v_0.AuxInt 6252 ptr := v_0.Args[0] 6253 idx := v.Args[1] 6254 mem := v.Args[2] 6255 v.reset(OpAMD64MOVLstoreconstidx4) 6256 v.AuxInt = ValAndOff(x).add(c) 6257 v.Aux = sym 6258 v.AddArg(ptr) 6259 v.AddArg(idx) 6260 v.AddArg(mem) 6261 return true 6262 } 6263 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 6264 // cond: 6265 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 6266 for { 6267 x := v.AuxInt 6268 sym := v.Aux 6269 ptr := v.Args[0] 6270 v_1 := v.Args[1] 6271 if v_1.Op != OpAMD64ADDQconst { 6272 break 6273 } 6274 c := v_1.AuxInt 6275 idx := v_1.Args[0] 6276 mem := v.Args[2] 6277 v.reset(OpAMD64MOVLstoreconstidx4) 6278 v.AuxInt = ValAndOff(x).add(4 * c) 6279 v.Aux = sym 6280 v.AddArg(ptr) 6281 v.AddArg(idx) 6282 v.AddArg(mem) 6283 return true 6284 } 6285 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 6286 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 6287 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 6288 for { 6289 c := v.AuxInt 6290 s := v.Aux 6291 p := v.Args[0] 6292 i := v.Args[1] 6293 x := v.Args[2] 6294 if x.Op != OpAMD64MOVLstoreconstidx4 { 6295 break 6296 } 6297 a := x.AuxInt 6298 if x.Aux != s { 6299 break 6300 } 6301 if p != x.Args[0] { 6302 break 6303 } 6304 if i != x.Args[1] { 6305 break 6306 } 6307 mem := x.Args[2] 6308 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 6309 break 6310 } 6311 v.reset(OpAMD64MOVQstoreidx1) 6312 v.AuxInt = ValAndOff(a).Off() 6313 v.Aux = s 6314 v.AddArg(p) 6315 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 6316 v0.AuxInt = 2 6317 v0.AddArg(i) 6318 v.AddArg(v0) 6319 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 6320 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 6321 v.AddArg(v1) 6322 v.AddArg(mem) 6323 return true 6324 } 6325 return false 6326 } 6327 func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool { 6328 b := v.Block 6329 _ = b 6330 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 6331 // cond: 6332 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 6333 for { 6334 c := v.AuxInt 6335 sym := v.Aux 6336 ptr := v.Args[0] 6337 v_1 := v.Args[1] 6338 if v_1.Op != OpAMD64SHLQconst { 6339 break 6340 } 6341 if v_1.AuxInt != 2 { 6342 break 6343 } 6344 idx := v_1.Args[0] 6345 val := v.Args[2] 6346 mem := v.Args[3] 6347 v.reset(OpAMD64MOVLstoreidx4) 6348 v.AuxInt = c 6349 v.Aux = sym 6350 v.AddArg(ptr) 6351 v.AddArg(idx) 6352 v.AddArg(val) 6353 v.AddArg(mem) 6354 return true 6355 } 6356 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6357 // cond: 6358 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 6359 for { 6360 c := v.AuxInt 6361 sym := v.Aux 6362 v_0 := v.Args[0] 6363 if v_0.Op != OpAMD64ADDQconst { 6364 break 6365 } 6366 d := v_0.AuxInt 6367 ptr := v_0.Args[0] 6368 idx := v.Args[1] 6369 val := v.Args[2] 6370 mem := v.Args[3] 6371 v.reset(OpAMD64MOVLstoreidx1) 6372 v.AuxInt = c + d 6373 v.Aux = sym 6374 v.AddArg(ptr) 6375 v.AddArg(idx) 6376 v.AddArg(val) 6377 v.AddArg(mem) 6378 return true 6379 } 6380 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6381 // cond: 6382 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 6383 for { 6384 c := v.AuxInt 6385 sym := v.Aux 6386 ptr := v.Args[0] 6387 v_1 := v.Args[1] 6388 if v_1.Op != OpAMD64ADDQconst { 6389 break 6390 } 6391 d := v_1.AuxInt 6392 idx := v_1.Args[0] 6393 val := v.Args[2] 6394 mem := v.Args[3] 6395 v.reset(OpAMD64MOVLstoreidx1) 6396 v.AuxInt = c + d 6397 v.Aux = sym 6398 v.AddArg(ptr) 6399 v.AddArg(idx) 6400 v.AddArg(val) 6401 v.AddArg(mem) 6402 return true 6403 } 6404 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 6405 // cond: x.Uses == 1 && clobber(x) 6406 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 6407 for { 6408 i := v.AuxInt 6409 s := v.Aux 6410 p := v.Args[0] 6411 idx := v.Args[1] 6412 v_2 := v.Args[2] 6413 if v_2.Op != OpAMD64SHRQconst { 6414 break 6415 } 6416 if v_2.AuxInt != 32 { 6417 break 6418 } 6419 w := v_2.Args[0] 6420 x := v.Args[3] 6421 if x.Op != OpAMD64MOVLstoreidx1 { 6422 break 6423 } 6424 if x.AuxInt != i-4 { 6425 break 6426 } 6427 if x.Aux != s { 6428 break 6429 } 6430 if p != x.Args[0] { 6431 break 6432 } 6433 if idx != x.Args[1] { 6434 break 6435 } 6436 if w != x.Args[2] { 6437 break 6438 } 6439 mem := x.Args[3] 6440 if !(x.Uses == 1 && clobber(x)) { 6441 break 6442 } 6443 v.reset(OpAMD64MOVQstoreidx1) 6444 v.AuxInt = i - 4 6445 v.Aux = s 6446 v.AddArg(p) 6447 v.AddArg(idx) 6448 v.AddArg(w) 6449 v.AddArg(mem) 6450 return true 6451 } 6452 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 6453 // cond: x.Uses == 1 && clobber(x) 6454 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 6455 for { 6456 i := v.AuxInt 6457 s := v.Aux 6458 p := v.Args[0] 6459 idx := v.Args[1] 6460 v_2 := v.Args[2] 6461 if v_2.Op != OpAMD64SHRQconst { 6462 break 6463 } 6464 j := v_2.AuxInt 6465 w := v_2.Args[0] 6466 x := v.Args[3] 6467 if x.Op != OpAMD64MOVLstoreidx1 { 6468 break 6469 } 6470 if x.AuxInt != i-4 { 6471 break 6472 } 6473 if x.Aux != s { 6474 break 6475 } 6476 if p != x.Args[0] { 6477 break 6478 } 6479 if idx != x.Args[1] { 6480 break 6481 } 6482 w0 := x.Args[2] 6483 if w0.Op != OpAMD64SHRQconst { 6484 break 6485 } 6486 if w0.AuxInt != j-32 { 6487 break 6488 } 6489 if w != w0.Args[0] { 6490 break 6491 } 6492 mem := x.Args[3] 6493 if !(x.Uses == 1 && clobber(x)) { 6494 break 6495 } 6496 v.reset(OpAMD64MOVQstoreidx1) 6497 v.AuxInt = i - 4 6498 v.Aux = s 6499 v.AddArg(p) 6500 v.AddArg(idx) 6501 v.AddArg(w0) 6502 v.AddArg(mem) 6503 return true 6504 } 6505 return false 6506 } 6507 func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool { 6508 b := v.Block 6509 _ = b 6510 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6511 // cond: 6512 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 6513 for { 6514 c := v.AuxInt 6515 sym := v.Aux 6516 v_0 := v.Args[0] 6517 if v_0.Op != OpAMD64ADDQconst { 6518 break 6519 } 6520 d := v_0.AuxInt 6521 ptr := v_0.Args[0] 6522 idx := v.Args[1] 6523 val := v.Args[2] 6524 mem := v.Args[3] 6525 v.reset(OpAMD64MOVLstoreidx4) 6526 v.AuxInt = c + d 6527 v.Aux = sym 6528 v.AddArg(ptr) 6529 v.AddArg(idx) 6530 v.AddArg(val) 6531 v.AddArg(mem) 6532 return true 6533 } 6534 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6535 // cond: 6536 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 6537 for { 6538 c := v.AuxInt 6539 sym := v.Aux 6540 ptr := v.Args[0] 6541 v_1 := v.Args[1] 6542 if v_1.Op != OpAMD64ADDQconst { 6543 break 6544 } 6545 d := v_1.AuxInt 6546 idx := v_1.Args[0] 6547 val := v.Args[2] 6548 mem := v.Args[3] 6549 v.reset(OpAMD64MOVLstoreidx4) 6550 v.AuxInt = c + 4*d 6551 v.Aux = sym 6552 v.AddArg(ptr) 6553 v.AddArg(idx) 6554 v.AddArg(val) 6555 v.AddArg(mem) 6556 return true 6557 } 6558 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 6559 // cond: x.Uses == 1 && clobber(x) 6560 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 6561 for { 6562 i := v.AuxInt 6563 s := v.Aux 6564 p := v.Args[0] 6565 idx := v.Args[1] 6566 v_2 := v.Args[2] 6567 if v_2.Op != OpAMD64SHRQconst { 6568 break 6569 } 6570 if v_2.AuxInt != 32 { 6571 break 6572 } 6573 w := v_2.Args[0] 6574 x := v.Args[3] 6575 if x.Op != OpAMD64MOVLstoreidx4 { 6576 break 6577 } 6578 if x.AuxInt != i-4 { 6579 break 6580 } 6581 if x.Aux != s { 6582 break 6583 } 6584 if p != x.Args[0] { 6585 break 6586 } 6587 if idx != x.Args[1] { 6588 break 6589 } 6590 if w != x.Args[2] { 6591 break 6592 } 6593 mem := x.Args[3] 6594 if !(x.Uses == 1 && clobber(x)) { 6595 break 6596 } 6597 v.reset(OpAMD64MOVQstoreidx1) 6598 v.AuxInt = i - 4 6599 v.Aux = s 6600 v.AddArg(p) 6601 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 6602 v0.AuxInt = 2 6603 v0.AddArg(idx) 6604 v.AddArg(v0) 6605 v.AddArg(w) 6606 v.AddArg(mem) 6607 return true 6608 } 6609 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 6610 // cond: x.Uses == 1 && clobber(x) 6611 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 6612 for { 6613 i := v.AuxInt 6614 s := v.Aux 6615 p := v.Args[0] 6616 idx := v.Args[1] 6617 v_2 := v.Args[2] 6618 if v_2.Op != OpAMD64SHRQconst { 6619 break 6620 } 6621 j := v_2.AuxInt 6622 w := v_2.Args[0] 6623 x := v.Args[3] 6624 if x.Op != OpAMD64MOVLstoreidx4 { 6625 break 6626 } 6627 if x.AuxInt != i-4 { 6628 break 6629 } 6630 if x.Aux != s { 6631 break 6632 } 6633 if p != x.Args[0] { 6634 break 6635 } 6636 if idx != x.Args[1] { 6637 break 6638 } 6639 w0 := x.Args[2] 6640 if w0.Op != OpAMD64SHRQconst { 6641 break 6642 } 6643 if w0.AuxInt != j-32 { 6644 break 6645 } 6646 if w != w0.Args[0] { 6647 break 6648 } 6649 mem := x.Args[3] 6650 if !(x.Uses == 1 && clobber(x)) { 6651 break 6652 } 6653 v.reset(OpAMD64MOVQstoreidx1) 6654 v.AuxInt = i - 4 6655 v.Aux = s 6656 v.AddArg(p) 6657 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 6658 v0.AuxInt = 2 6659 v0.AddArg(idx) 6660 v.AddArg(v0) 6661 v.AddArg(w0) 6662 v.AddArg(mem) 6663 return true 6664 } 6665 return false 6666 } 6667 func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool { 6668 b := v.Block 6669 _ = b 6670 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 6671 // cond: is32Bit(off1+off2) 6672 // result: (MOVOload [off1+off2] {sym} ptr mem) 6673 for { 6674 off1 := v.AuxInt 6675 sym := v.Aux 6676 v_0 := v.Args[0] 6677 if v_0.Op != OpAMD64ADDQconst { 6678 break 6679 } 6680 off2 := v_0.AuxInt 6681 ptr := v_0.Args[0] 6682 mem := v.Args[1] 6683 if !(is32Bit(off1 + off2)) { 6684 break 6685 } 6686 v.reset(OpAMD64MOVOload) 6687 v.AuxInt = off1 + off2 6688 v.Aux = sym 6689 v.AddArg(ptr) 6690 v.AddArg(mem) 6691 return true 6692 } 6693 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6694 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6695 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6696 for { 6697 off1 := v.AuxInt 6698 sym1 := v.Aux 6699 v_0 := v.Args[0] 6700 if v_0.Op != OpAMD64LEAQ { 6701 break 6702 } 6703 off2 := v_0.AuxInt 6704 sym2 := v_0.Aux 6705 base := v_0.Args[0] 6706 mem := v.Args[1] 6707 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6708 break 6709 } 6710 v.reset(OpAMD64MOVOload) 6711 v.AuxInt = off1 + off2 6712 v.Aux = mergeSym(sym1, sym2) 6713 v.AddArg(base) 6714 v.AddArg(mem) 6715 return true 6716 } 6717 return false 6718 } 6719 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool { 6720 b := v.Block 6721 _ = b 6722 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 6723 // cond: is32Bit(off1+off2) 6724 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 6725 for { 6726 off1 := v.AuxInt 6727 sym := v.Aux 6728 v_0 := v.Args[0] 6729 if v_0.Op != OpAMD64ADDQconst { 6730 break 6731 } 6732 off2 := v_0.AuxInt 6733 ptr := v_0.Args[0] 6734 val := v.Args[1] 6735 mem := v.Args[2] 6736 if !(is32Bit(off1 + off2)) { 6737 break 6738 } 6739 v.reset(OpAMD64MOVOstore) 6740 v.AuxInt = off1 + off2 6741 v.Aux = sym 6742 v.AddArg(ptr) 6743 v.AddArg(val) 6744 v.AddArg(mem) 6745 return true 6746 } 6747 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6748 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6749 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6750 for { 6751 off1 := v.AuxInt 6752 sym1 := v.Aux 6753 v_0 := v.Args[0] 6754 if v_0.Op != OpAMD64LEAQ { 6755 break 6756 } 6757 off2 := v_0.AuxInt 6758 sym2 := v_0.Aux 6759 base := v_0.Args[0] 6760 val := v.Args[1] 6761 mem := v.Args[2] 6762 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6763 break 6764 } 6765 v.reset(OpAMD64MOVOstore) 6766 v.AuxInt = off1 + off2 6767 v.Aux = mergeSym(sym1, sym2) 6768 v.AddArg(base) 6769 v.AddArg(val) 6770 v.AddArg(mem) 6771 return true 6772 } 6773 return false 6774 } 6775 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value, config *Config) bool { 6776 b := v.Block 6777 _ = b 6778 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 6779 // cond: is32Bit(off1+off2) 6780 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 6781 for { 6782 off1 := v.AuxInt 6783 sym := v.Aux 6784 v_0 := v.Args[0] 6785 if v_0.Op != OpAMD64ADDQconst { 6786 break 6787 } 6788 off2 := v_0.AuxInt 6789 ptr := v_0.Args[0] 6790 mem := v.Args[1] 6791 if !(is32Bit(off1 + off2)) { 6792 break 6793 } 6794 v.reset(OpAMD64MOVQatomicload) 6795 v.AuxInt = off1 + off2 6796 v.Aux = sym 6797 v.AddArg(ptr) 6798 v.AddArg(mem) 6799 return true 6800 } 6801 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 6802 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6803 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 6804 for { 6805 off1 := v.AuxInt 6806 sym1 := v.Aux 6807 v_0 := v.Args[0] 6808 if v_0.Op != OpAMD64LEAQ { 6809 break 6810 } 6811 off2 := v_0.AuxInt 6812 sym2 := v_0.Aux 6813 ptr := v_0.Args[0] 6814 mem := v.Args[1] 6815 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6816 break 6817 } 6818 v.reset(OpAMD64MOVQatomicload) 6819 v.AuxInt = off1 + off2 6820 v.Aux = mergeSym(sym1, sym2) 6821 v.AddArg(ptr) 6822 v.AddArg(mem) 6823 return true 6824 } 6825 return false 6826 } 6827 func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool { 6828 b := v.Block 6829 _ = b 6830 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 6831 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6832 // result: x 6833 for { 6834 off := v.AuxInt 6835 sym := v.Aux 6836 ptr := v.Args[0] 6837 v_1 := v.Args[1] 6838 if v_1.Op != OpAMD64MOVQstore { 6839 break 6840 } 6841 off2 := v_1.AuxInt 6842 sym2 := v_1.Aux 6843 ptr2 := v_1.Args[0] 6844 x := v_1.Args[1] 6845 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6846 break 6847 } 6848 v.reset(OpCopy) 6849 v.Type = x.Type 6850 v.AddArg(x) 6851 return true 6852 } 6853 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 6854 // cond: is32Bit(off1+off2) 6855 // result: (MOVQload [off1+off2] {sym} ptr mem) 6856 for { 6857 off1 := v.AuxInt 6858 sym := v.Aux 6859 v_0 := v.Args[0] 6860 if v_0.Op != OpAMD64ADDQconst { 6861 break 6862 } 6863 off2 := v_0.AuxInt 6864 ptr := v_0.Args[0] 6865 mem := v.Args[1] 6866 if !(is32Bit(off1 + off2)) { 6867 break 6868 } 6869 v.reset(OpAMD64MOVQload) 6870 v.AuxInt = off1 + off2 6871 v.Aux = sym 6872 v.AddArg(ptr) 6873 v.AddArg(mem) 6874 return true 6875 } 6876 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6877 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6878 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6879 for { 6880 off1 := v.AuxInt 6881 sym1 := v.Aux 6882 v_0 := v.Args[0] 6883 if v_0.Op != OpAMD64LEAQ { 6884 break 6885 } 6886 off2 := v_0.AuxInt 6887 sym2 := v_0.Aux 6888 base := v_0.Args[0] 6889 mem := v.Args[1] 6890 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6891 break 6892 } 6893 v.reset(OpAMD64MOVQload) 6894 v.AuxInt = off1 + off2 6895 v.Aux = mergeSym(sym1, sym2) 6896 v.AddArg(base) 6897 v.AddArg(mem) 6898 return true 6899 } 6900 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 6901 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6902 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6903 for { 6904 off1 := v.AuxInt 6905 sym1 := v.Aux 6906 v_0 := v.Args[0] 6907 if v_0.Op != OpAMD64LEAQ1 { 6908 break 6909 } 6910 off2 := v_0.AuxInt 6911 sym2 := v_0.Aux 6912 ptr := v_0.Args[0] 6913 idx := v_0.Args[1] 6914 mem := v.Args[1] 6915 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6916 break 6917 } 6918 v.reset(OpAMD64MOVQloadidx1) 6919 v.AuxInt = off1 + off2 6920 v.Aux = mergeSym(sym1, sym2) 6921 v.AddArg(ptr) 6922 v.AddArg(idx) 6923 v.AddArg(mem) 6924 return true 6925 } 6926 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 6927 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6928 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6929 for { 6930 off1 := v.AuxInt 6931 sym1 := v.Aux 6932 v_0 := v.Args[0] 6933 if v_0.Op != OpAMD64LEAQ8 { 6934 break 6935 } 6936 off2 := v_0.AuxInt 6937 sym2 := v_0.Aux 6938 ptr := v_0.Args[0] 6939 idx := v_0.Args[1] 6940 mem := v.Args[1] 6941 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6942 break 6943 } 6944 v.reset(OpAMD64MOVQloadidx8) 6945 v.AuxInt = off1 + off2 6946 v.Aux = mergeSym(sym1, sym2) 6947 v.AddArg(ptr) 6948 v.AddArg(idx) 6949 v.AddArg(mem) 6950 return true 6951 } 6952 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 6953 // cond: ptr.Op != OpSB 6954 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 6955 for { 6956 off := v.AuxInt 6957 sym := v.Aux 6958 v_0 := v.Args[0] 6959 if v_0.Op != OpAMD64ADDQ { 6960 break 6961 } 6962 ptr := v_0.Args[0] 6963 idx := v_0.Args[1] 6964 mem := v.Args[1] 6965 if !(ptr.Op != OpSB) { 6966 break 6967 } 6968 v.reset(OpAMD64MOVQloadidx1) 6969 v.AuxInt = off 6970 v.Aux = sym 6971 v.AddArg(ptr) 6972 v.AddArg(idx) 6973 v.AddArg(mem) 6974 return true 6975 } 6976 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 6977 // cond: canMergeSym(sym1, sym2) 6978 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6979 for { 6980 off1 := v.AuxInt 6981 sym1 := v.Aux 6982 v_0 := v.Args[0] 6983 if v_0.Op != OpAMD64LEAL { 6984 break 6985 } 6986 off2 := v_0.AuxInt 6987 sym2 := v_0.Aux 6988 base := v_0.Args[0] 6989 mem := v.Args[1] 6990 if !(canMergeSym(sym1, sym2)) { 6991 break 6992 } 6993 v.reset(OpAMD64MOVQload) 6994 v.AuxInt = off1 + off2 6995 v.Aux = mergeSym(sym1, sym2) 6996 v.AddArg(base) 6997 v.AddArg(mem) 6998 return true 6999 } 7000 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 7001 // cond: is32Bit(off1+off2) 7002 // result: (MOVQload [off1+off2] {sym} ptr mem) 7003 for { 7004 off1 := v.AuxInt 7005 sym := v.Aux 7006 v_0 := v.Args[0] 7007 if v_0.Op != OpAMD64ADDLconst { 7008 break 7009 } 7010 off2 := v_0.AuxInt 7011 ptr := v_0.Args[0] 7012 mem := v.Args[1] 7013 if !(is32Bit(off1 + off2)) { 7014 break 7015 } 7016 v.reset(OpAMD64MOVQload) 7017 v.AuxInt = off1 + off2 7018 v.Aux = sym 7019 v.AddArg(ptr) 7020 v.AddArg(mem) 7021 return true 7022 } 7023 return false 7024 } 7025 func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool { 7026 b := v.Block 7027 _ = b 7028 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7029 // cond: 7030 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 7031 for { 7032 c := v.AuxInt 7033 sym := v.Aux 7034 ptr := v.Args[0] 7035 v_1 := v.Args[1] 7036 if v_1.Op != OpAMD64SHLQconst { 7037 break 7038 } 7039 if v_1.AuxInt != 3 { 7040 break 7041 } 7042 idx := v_1.Args[0] 7043 mem := v.Args[2] 7044 v.reset(OpAMD64MOVQloadidx8) 7045 v.AuxInt = c 7046 v.Aux = sym 7047 v.AddArg(ptr) 7048 v.AddArg(idx) 7049 v.AddArg(mem) 7050 return true 7051 } 7052 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7053 // cond: 7054 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 7055 for { 7056 c := v.AuxInt 7057 sym := v.Aux 7058 v_0 := v.Args[0] 7059 if v_0.Op != OpAMD64ADDQconst { 7060 break 7061 } 7062 d := v_0.AuxInt 7063 ptr := v_0.Args[0] 7064 idx := v.Args[1] 7065 mem := v.Args[2] 7066 v.reset(OpAMD64MOVQloadidx1) 7067 v.AuxInt = c + d 7068 v.Aux = sym 7069 v.AddArg(ptr) 7070 v.AddArg(idx) 7071 v.AddArg(mem) 7072 return true 7073 } 7074 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7075 // cond: 7076 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 7077 for { 7078 c := v.AuxInt 7079 sym := v.Aux 7080 ptr := v.Args[0] 7081 v_1 := v.Args[1] 7082 if v_1.Op != OpAMD64ADDQconst { 7083 break 7084 } 7085 d := v_1.AuxInt 7086 idx := v_1.Args[0] 7087 mem := v.Args[2] 7088 v.reset(OpAMD64MOVQloadidx1) 7089 v.AuxInt = c + d 7090 v.Aux = sym 7091 v.AddArg(ptr) 7092 v.AddArg(idx) 7093 v.AddArg(mem) 7094 return true 7095 } 7096 return false 7097 } 7098 func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool { 7099 b := v.Block 7100 _ = b 7101 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 7102 // cond: 7103 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 7104 for { 7105 c := v.AuxInt 7106 sym := v.Aux 7107 v_0 := v.Args[0] 7108 if v_0.Op != OpAMD64ADDQconst { 7109 break 7110 } 7111 d := v_0.AuxInt 7112 ptr := v_0.Args[0] 7113 idx := v.Args[1] 7114 mem := v.Args[2] 7115 v.reset(OpAMD64MOVQloadidx8) 7116 v.AuxInt = c + d 7117 v.Aux = sym 7118 v.AddArg(ptr) 7119 v.AddArg(idx) 7120 v.AddArg(mem) 7121 return true 7122 } 7123 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 7124 // cond: 7125 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 7126 for { 7127 c := v.AuxInt 7128 sym := v.Aux 7129 ptr := v.Args[0] 7130 v_1 := v.Args[1] 7131 if v_1.Op != OpAMD64ADDQconst { 7132 break 7133 } 7134 d := v_1.AuxInt 7135 idx := v_1.Args[0] 7136 mem := v.Args[2] 7137 v.reset(OpAMD64MOVQloadidx8) 7138 v.AuxInt = c + 8*d 7139 v.Aux = sym 7140 v.AddArg(ptr) 7141 v.AddArg(idx) 7142 v.AddArg(mem) 7143 return true 7144 } 7145 return false 7146 } 7147 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool { 7148 b := v.Block 7149 _ = b 7150 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7151 // cond: is32Bit(off1+off2) 7152 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 7153 for { 7154 off1 := v.AuxInt 7155 sym := v.Aux 7156 v_0 := v.Args[0] 7157 if v_0.Op != OpAMD64ADDQconst { 7158 break 7159 } 7160 off2 := v_0.AuxInt 7161 ptr := v_0.Args[0] 7162 val := v.Args[1] 7163 mem := v.Args[2] 7164 if !(is32Bit(off1 + off2)) { 7165 break 7166 } 7167 v.reset(OpAMD64MOVQstore) 7168 v.AuxInt = off1 + off2 7169 v.Aux = sym 7170 v.AddArg(ptr) 7171 v.AddArg(val) 7172 v.AddArg(mem) 7173 return true 7174 } 7175 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 7176 // cond: validValAndOff(c,off) 7177 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 7178 for { 7179 off := v.AuxInt 7180 sym := v.Aux 7181 ptr := v.Args[0] 7182 v_1 := v.Args[1] 7183 if v_1.Op != OpAMD64MOVQconst { 7184 break 7185 } 7186 c := v_1.AuxInt 7187 mem := v.Args[2] 7188 if !(validValAndOff(c, off)) { 7189 break 7190 } 7191 v.reset(OpAMD64MOVQstoreconst) 7192 v.AuxInt = makeValAndOff(c, off) 7193 v.Aux = sym 7194 v.AddArg(ptr) 7195 v.AddArg(mem) 7196 return true 7197 } 7198 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7199 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7200 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7201 for { 7202 off1 := v.AuxInt 7203 sym1 := v.Aux 7204 v_0 := v.Args[0] 7205 if v_0.Op != OpAMD64LEAQ { 7206 break 7207 } 7208 off2 := v_0.AuxInt 7209 sym2 := v_0.Aux 7210 base := v_0.Args[0] 7211 val := v.Args[1] 7212 mem := v.Args[2] 7213 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7214 break 7215 } 7216 v.reset(OpAMD64MOVQstore) 7217 v.AuxInt = off1 + off2 7218 v.Aux = mergeSym(sym1, sym2) 7219 v.AddArg(base) 7220 v.AddArg(val) 7221 v.AddArg(mem) 7222 return true 7223 } 7224 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7225 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7226 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7227 for { 7228 off1 := v.AuxInt 7229 sym1 := v.Aux 7230 v_0 := v.Args[0] 7231 if v_0.Op != OpAMD64LEAQ1 { 7232 break 7233 } 7234 off2 := v_0.AuxInt 7235 sym2 := v_0.Aux 7236 ptr := v_0.Args[0] 7237 idx := v_0.Args[1] 7238 val := v.Args[1] 7239 mem := v.Args[2] 7240 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7241 break 7242 } 7243 v.reset(OpAMD64MOVQstoreidx1) 7244 v.AuxInt = off1 + off2 7245 v.Aux = mergeSym(sym1, sym2) 7246 v.AddArg(ptr) 7247 v.AddArg(idx) 7248 v.AddArg(val) 7249 v.AddArg(mem) 7250 return true 7251 } 7252 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 7253 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7254 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7255 for { 7256 off1 := v.AuxInt 7257 sym1 := v.Aux 7258 v_0 := v.Args[0] 7259 if v_0.Op != OpAMD64LEAQ8 { 7260 break 7261 } 7262 off2 := v_0.AuxInt 7263 sym2 := v_0.Aux 7264 ptr := v_0.Args[0] 7265 idx := v_0.Args[1] 7266 val := v.Args[1] 7267 mem := v.Args[2] 7268 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7269 break 7270 } 7271 v.reset(OpAMD64MOVQstoreidx8) 7272 v.AuxInt = off1 + off2 7273 v.Aux = mergeSym(sym1, sym2) 7274 v.AddArg(ptr) 7275 v.AddArg(idx) 7276 v.AddArg(val) 7277 v.AddArg(mem) 7278 return true 7279 } 7280 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 7281 // cond: ptr.Op != OpSB 7282 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 7283 for { 7284 off := v.AuxInt 7285 sym := v.Aux 7286 v_0 := v.Args[0] 7287 if v_0.Op != OpAMD64ADDQ { 7288 break 7289 } 7290 ptr := v_0.Args[0] 7291 idx := v_0.Args[1] 7292 val := v.Args[1] 7293 mem := v.Args[2] 7294 if !(ptr.Op != OpSB) { 7295 break 7296 } 7297 v.reset(OpAMD64MOVQstoreidx1) 7298 v.AuxInt = off 7299 v.Aux = sym 7300 v.AddArg(ptr) 7301 v.AddArg(idx) 7302 v.AddArg(val) 7303 v.AddArg(mem) 7304 return true 7305 } 7306 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 7307 // cond: canMergeSym(sym1, sym2) 7308 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7309 for { 7310 off1 := v.AuxInt 7311 sym1 := v.Aux 7312 v_0 := v.Args[0] 7313 if v_0.Op != OpAMD64LEAL { 7314 break 7315 } 7316 off2 := v_0.AuxInt 7317 sym2 := v_0.Aux 7318 base := v_0.Args[0] 7319 val := v.Args[1] 7320 mem := v.Args[2] 7321 if !(canMergeSym(sym1, sym2)) { 7322 break 7323 } 7324 v.reset(OpAMD64MOVQstore) 7325 v.AuxInt = off1 + off2 7326 v.Aux = mergeSym(sym1, sym2) 7327 v.AddArg(base) 7328 v.AddArg(val) 7329 v.AddArg(mem) 7330 return true 7331 } 7332 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 7333 // cond: is32Bit(off1+off2) 7334 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 7335 for { 7336 off1 := v.AuxInt 7337 sym := v.Aux 7338 v_0 := v.Args[0] 7339 if v_0.Op != OpAMD64ADDLconst { 7340 break 7341 } 7342 off2 := v_0.AuxInt 7343 ptr := v_0.Args[0] 7344 val := v.Args[1] 7345 mem := v.Args[2] 7346 if !(is32Bit(off1 + off2)) { 7347 break 7348 } 7349 v.reset(OpAMD64MOVQstore) 7350 v.AuxInt = off1 + off2 7351 v.Aux = sym 7352 v.AddArg(ptr) 7353 v.AddArg(val) 7354 v.AddArg(mem) 7355 return true 7356 } 7357 return false 7358 } 7359 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool { 7360 b := v.Block 7361 _ = b 7362 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 7363 // cond: ValAndOff(sc).canAdd(off) 7364 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7365 for { 7366 sc := v.AuxInt 7367 s := v.Aux 7368 v_0 := v.Args[0] 7369 if v_0.Op != OpAMD64ADDQconst { 7370 break 7371 } 7372 off := v_0.AuxInt 7373 ptr := v_0.Args[0] 7374 mem := v.Args[1] 7375 if !(ValAndOff(sc).canAdd(off)) { 7376 break 7377 } 7378 v.reset(OpAMD64MOVQstoreconst) 7379 v.AuxInt = ValAndOff(sc).add(off) 7380 v.Aux = s 7381 v.AddArg(ptr) 7382 v.AddArg(mem) 7383 return true 7384 } 7385 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 7386 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7387 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7388 for { 7389 sc := v.AuxInt 7390 sym1 := v.Aux 7391 v_0 := v.Args[0] 7392 if v_0.Op != OpAMD64LEAQ { 7393 break 7394 } 7395 off := v_0.AuxInt 7396 sym2 := v_0.Aux 7397 ptr := v_0.Args[0] 7398 mem := v.Args[1] 7399 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7400 break 7401 } 7402 v.reset(OpAMD64MOVQstoreconst) 7403 v.AuxInt = ValAndOff(sc).add(off) 7404 v.Aux = mergeSym(sym1, sym2) 7405 v.AddArg(ptr) 7406 v.AddArg(mem) 7407 return true 7408 } 7409 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 7410 // cond: canMergeSym(sym1, sym2) 7411 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7412 for { 7413 x := v.AuxInt 7414 sym1 := v.Aux 7415 v_0 := v.Args[0] 7416 if v_0.Op != OpAMD64LEAQ1 { 7417 break 7418 } 7419 off := v_0.AuxInt 7420 sym2 := v_0.Aux 7421 ptr := v_0.Args[0] 7422 idx := v_0.Args[1] 7423 mem := v.Args[1] 7424 if !(canMergeSym(sym1, sym2)) { 7425 break 7426 } 7427 v.reset(OpAMD64MOVQstoreconstidx1) 7428 v.AuxInt = ValAndOff(x).add(off) 7429 v.Aux = mergeSym(sym1, sym2) 7430 v.AddArg(ptr) 7431 v.AddArg(idx) 7432 v.AddArg(mem) 7433 return true 7434 } 7435 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 7436 // cond: canMergeSym(sym1, sym2) 7437 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7438 for { 7439 x := v.AuxInt 7440 sym1 := v.Aux 7441 v_0 := v.Args[0] 7442 if v_0.Op != OpAMD64LEAQ8 { 7443 break 7444 } 7445 off := v_0.AuxInt 7446 sym2 := v_0.Aux 7447 ptr := v_0.Args[0] 7448 idx := v_0.Args[1] 7449 mem := v.Args[1] 7450 if !(canMergeSym(sym1, sym2)) { 7451 break 7452 } 7453 v.reset(OpAMD64MOVQstoreconstidx8) 7454 v.AuxInt = ValAndOff(x).add(off) 7455 v.Aux = mergeSym(sym1, sym2) 7456 v.AddArg(ptr) 7457 v.AddArg(idx) 7458 v.AddArg(mem) 7459 return true 7460 } 7461 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 7462 // cond: 7463 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 7464 for { 7465 x := v.AuxInt 7466 sym := v.Aux 7467 v_0 := v.Args[0] 7468 if v_0.Op != OpAMD64ADDQ { 7469 break 7470 } 7471 ptr := v_0.Args[0] 7472 idx := v_0.Args[1] 7473 mem := v.Args[1] 7474 v.reset(OpAMD64MOVQstoreconstidx1) 7475 v.AuxInt = x 7476 v.Aux = sym 7477 v.AddArg(ptr) 7478 v.AddArg(idx) 7479 v.AddArg(mem) 7480 return true 7481 } 7482 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 7483 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7484 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7485 for { 7486 sc := v.AuxInt 7487 sym1 := v.Aux 7488 v_0 := v.Args[0] 7489 if v_0.Op != OpAMD64LEAL { 7490 break 7491 } 7492 off := v_0.AuxInt 7493 sym2 := v_0.Aux 7494 ptr := v_0.Args[0] 7495 mem := v.Args[1] 7496 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7497 break 7498 } 7499 v.reset(OpAMD64MOVQstoreconst) 7500 v.AuxInt = ValAndOff(sc).add(off) 7501 v.Aux = mergeSym(sym1, sym2) 7502 v.AddArg(ptr) 7503 v.AddArg(mem) 7504 return true 7505 } 7506 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 7507 // cond: ValAndOff(sc).canAdd(off) 7508 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7509 for { 7510 sc := v.AuxInt 7511 s := v.Aux 7512 v_0 := v.Args[0] 7513 if v_0.Op != OpAMD64ADDLconst { 7514 break 7515 } 7516 off := v_0.AuxInt 7517 ptr := v_0.Args[0] 7518 mem := v.Args[1] 7519 if !(ValAndOff(sc).canAdd(off)) { 7520 break 7521 } 7522 v.reset(OpAMD64MOVQstoreconst) 7523 v.AuxInt = ValAndOff(sc).add(off) 7524 v.Aux = s 7525 v.AddArg(ptr) 7526 v.AddArg(mem) 7527 return true 7528 } 7529 return false 7530 } 7531 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool { 7532 b := v.Block 7533 _ = b 7534 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7535 // cond: 7536 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 7537 for { 7538 c := v.AuxInt 7539 sym := v.Aux 7540 ptr := v.Args[0] 7541 v_1 := v.Args[1] 7542 if v_1.Op != OpAMD64SHLQconst { 7543 break 7544 } 7545 if v_1.AuxInt != 3 { 7546 break 7547 } 7548 idx := v_1.Args[0] 7549 mem := v.Args[2] 7550 v.reset(OpAMD64MOVQstoreconstidx8) 7551 v.AuxInt = c 7552 v.Aux = sym 7553 v.AddArg(ptr) 7554 v.AddArg(idx) 7555 v.AddArg(mem) 7556 return true 7557 } 7558 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 7559 // cond: 7560 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7561 for { 7562 x := v.AuxInt 7563 sym := v.Aux 7564 v_0 := v.Args[0] 7565 if v_0.Op != OpAMD64ADDQconst { 7566 break 7567 } 7568 c := v_0.AuxInt 7569 ptr := v_0.Args[0] 7570 idx := v.Args[1] 7571 mem := v.Args[2] 7572 v.reset(OpAMD64MOVQstoreconstidx1) 7573 v.AuxInt = ValAndOff(x).add(c) 7574 v.Aux = sym 7575 v.AddArg(ptr) 7576 v.AddArg(idx) 7577 v.AddArg(mem) 7578 return true 7579 } 7580 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 7581 // cond: 7582 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7583 for { 7584 x := v.AuxInt 7585 sym := v.Aux 7586 ptr := v.Args[0] 7587 v_1 := v.Args[1] 7588 if v_1.Op != OpAMD64ADDQconst { 7589 break 7590 } 7591 c := v_1.AuxInt 7592 idx := v_1.Args[0] 7593 mem := v.Args[2] 7594 v.reset(OpAMD64MOVQstoreconstidx1) 7595 v.AuxInt = ValAndOff(x).add(c) 7596 v.Aux = sym 7597 v.AddArg(ptr) 7598 v.AddArg(idx) 7599 v.AddArg(mem) 7600 return true 7601 } 7602 return false 7603 } 7604 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool { 7605 b := v.Block 7606 _ = b 7607 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 7608 // cond: 7609 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7610 for { 7611 x := v.AuxInt 7612 sym := v.Aux 7613 v_0 := v.Args[0] 7614 if v_0.Op != OpAMD64ADDQconst { 7615 break 7616 } 7617 c := v_0.AuxInt 7618 ptr := v_0.Args[0] 7619 idx := v.Args[1] 7620 mem := v.Args[2] 7621 v.reset(OpAMD64MOVQstoreconstidx8) 7622 v.AuxInt = ValAndOff(x).add(c) 7623 v.Aux = sym 7624 v.AddArg(ptr) 7625 v.AddArg(idx) 7626 v.AddArg(mem) 7627 return true 7628 } 7629 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 7630 // cond: 7631 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 7632 for { 7633 x := v.AuxInt 7634 sym := v.Aux 7635 ptr := v.Args[0] 7636 v_1 := v.Args[1] 7637 if v_1.Op != OpAMD64ADDQconst { 7638 break 7639 } 7640 c := v_1.AuxInt 7641 idx := v_1.Args[0] 7642 mem := v.Args[2] 7643 v.reset(OpAMD64MOVQstoreconstidx8) 7644 v.AuxInt = ValAndOff(x).add(8 * c) 7645 v.Aux = sym 7646 v.AddArg(ptr) 7647 v.AddArg(idx) 7648 v.AddArg(mem) 7649 return true 7650 } 7651 return false 7652 } 7653 func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool { 7654 b := v.Block 7655 _ = b 7656 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 7657 // cond: 7658 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 7659 for { 7660 c := v.AuxInt 7661 sym := v.Aux 7662 ptr := v.Args[0] 7663 v_1 := v.Args[1] 7664 if v_1.Op != OpAMD64SHLQconst { 7665 break 7666 } 7667 if v_1.AuxInt != 3 { 7668 break 7669 } 7670 idx := v_1.Args[0] 7671 val := v.Args[2] 7672 mem := v.Args[3] 7673 v.reset(OpAMD64MOVQstoreidx8) 7674 v.AuxInt = c 7675 v.Aux = sym 7676 v.AddArg(ptr) 7677 v.AddArg(idx) 7678 v.AddArg(val) 7679 v.AddArg(mem) 7680 return true 7681 } 7682 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7683 // cond: 7684 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 7685 for { 7686 c := v.AuxInt 7687 sym := v.Aux 7688 v_0 := v.Args[0] 7689 if v_0.Op != OpAMD64ADDQconst { 7690 break 7691 } 7692 d := v_0.AuxInt 7693 ptr := v_0.Args[0] 7694 idx := v.Args[1] 7695 val := v.Args[2] 7696 mem := v.Args[3] 7697 v.reset(OpAMD64MOVQstoreidx1) 7698 v.AuxInt = c + d 7699 v.Aux = sym 7700 v.AddArg(ptr) 7701 v.AddArg(idx) 7702 v.AddArg(val) 7703 v.AddArg(mem) 7704 return true 7705 } 7706 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7707 // cond: 7708 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 7709 for { 7710 c := v.AuxInt 7711 sym := v.Aux 7712 ptr := v.Args[0] 7713 v_1 := v.Args[1] 7714 if v_1.Op != OpAMD64ADDQconst { 7715 break 7716 } 7717 d := v_1.AuxInt 7718 idx := v_1.Args[0] 7719 val := v.Args[2] 7720 mem := v.Args[3] 7721 v.reset(OpAMD64MOVQstoreidx1) 7722 v.AuxInt = c + d 7723 v.Aux = sym 7724 v.AddArg(ptr) 7725 v.AddArg(idx) 7726 v.AddArg(val) 7727 v.AddArg(mem) 7728 return true 7729 } 7730 return false 7731 } 7732 func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool { 7733 b := v.Block 7734 _ = b 7735 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7736 // cond: 7737 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 7738 for { 7739 c := v.AuxInt 7740 sym := v.Aux 7741 v_0 := v.Args[0] 7742 if v_0.Op != OpAMD64ADDQconst { 7743 break 7744 } 7745 d := v_0.AuxInt 7746 ptr := v_0.Args[0] 7747 idx := v.Args[1] 7748 val := v.Args[2] 7749 mem := v.Args[3] 7750 v.reset(OpAMD64MOVQstoreidx8) 7751 v.AuxInt = c + d 7752 v.Aux = sym 7753 v.AddArg(ptr) 7754 v.AddArg(idx) 7755 v.AddArg(val) 7756 v.AddArg(mem) 7757 return true 7758 } 7759 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7760 // cond: 7761 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 7762 for { 7763 c := v.AuxInt 7764 sym := v.Aux 7765 ptr := v.Args[0] 7766 v_1 := v.Args[1] 7767 if v_1.Op != OpAMD64ADDQconst { 7768 break 7769 } 7770 d := v_1.AuxInt 7771 idx := v_1.Args[0] 7772 val := v.Args[2] 7773 mem := v.Args[3] 7774 v.reset(OpAMD64MOVQstoreidx8) 7775 v.AuxInt = c + 8*d 7776 v.Aux = sym 7777 v.AddArg(ptr) 7778 v.AddArg(idx) 7779 v.AddArg(val) 7780 v.AddArg(mem) 7781 return true 7782 } 7783 return false 7784 } 7785 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool { 7786 b := v.Block 7787 _ = b 7788 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 7789 // cond: is32Bit(off1+off2) 7790 // result: (MOVSDload [off1+off2] {sym} ptr mem) 7791 for { 7792 off1 := v.AuxInt 7793 sym := v.Aux 7794 v_0 := v.Args[0] 7795 if v_0.Op != OpAMD64ADDQconst { 7796 break 7797 } 7798 off2 := v_0.AuxInt 7799 ptr := v_0.Args[0] 7800 mem := v.Args[1] 7801 if !(is32Bit(off1 + off2)) { 7802 break 7803 } 7804 v.reset(OpAMD64MOVSDload) 7805 v.AuxInt = off1 + off2 7806 v.Aux = sym 7807 v.AddArg(ptr) 7808 v.AddArg(mem) 7809 return true 7810 } 7811 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7812 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7813 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7814 for { 7815 off1 := v.AuxInt 7816 sym1 := v.Aux 7817 v_0 := v.Args[0] 7818 if v_0.Op != OpAMD64LEAQ { 7819 break 7820 } 7821 off2 := v_0.AuxInt 7822 sym2 := v_0.Aux 7823 base := v_0.Args[0] 7824 mem := v.Args[1] 7825 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7826 break 7827 } 7828 v.reset(OpAMD64MOVSDload) 7829 v.AuxInt = off1 + off2 7830 v.Aux = mergeSym(sym1, sym2) 7831 v.AddArg(base) 7832 v.AddArg(mem) 7833 return true 7834 } 7835 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7836 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7837 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7838 for { 7839 off1 := v.AuxInt 7840 sym1 := v.Aux 7841 v_0 := v.Args[0] 7842 if v_0.Op != OpAMD64LEAQ1 { 7843 break 7844 } 7845 off2 := v_0.AuxInt 7846 sym2 := v_0.Aux 7847 ptr := v_0.Args[0] 7848 idx := v_0.Args[1] 7849 mem := v.Args[1] 7850 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7851 break 7852 } 7853 v.reset(OpAMD64MOVSDloadidx1) 7854 v.AuxInt = off1 + off2 7855 v.Aux = mergeSym(sym1, sym2) 7856 v.AddArg(ptr) 7857 v.AddArg(idx) 7858 v.AddArg(mem) 7859 return true 7860 } 7861 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 7862 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7863 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7864 for { 7865 off1 := v.AuxInt 7866 sym1 := v.Aux 7867 v_0 := v.Args[0] 7868 if v_0.Op != OpAMD64LEAQ8 { 7869 break 7870 } 7871 off2 := v_0.AuxInt 7872 sym2 := v_0.Aux 7873 ptr := v_0.Args[0] 7874 idx := v_0.Args[1] 7875 mem := v.Args[1] 7876 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7877 break 7878 } 7879 v.reset(OpAMD64MOVSDloadidx8) 7880 v.AuxInt = off1 + off2 7881 v.Aux = mergeSym(sym1, sym2) 7882 v.AddArg(ptr) 7883 v.AddArg(idx) 7884 v.AddArg(mem) 7885 return true 7886 } 7887 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 7888 // cond: ptr.Op != OpSB 7889 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 7890 for { 7891 off := v.AuxInt 7892 sym := v.Aux 7893 v_0 := v.Args[0] 7894 if v_0.Op != OpAMD64ADDQ { 7895 break 7896 } 7897 ptr := v_0.Args[0] 7898 idx := v_0.Args[1] 7899 mem := v.Args[1] 7900 if !(ptr.Op != OpSB) { 7901 break 7902 } 7903 v.reset(OpAMD64MOVSDloadidx1) 7904 v.AuxInt = off 7905 v.Aux = sym 7906 v.AddArg(ptr) 7907 v.AddArg(idx) 7908 v.AddArg(mem) 7909 return true 7910 } 7911 return false 7912 } 7913 func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool { 7914 b := v.Block 7915 _ = b 7916 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7917 // cond: 7918 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 7919 for { 7920 c := v.AuxInt 7921 sym := v.Aux 7922 ptr := v.Args[0] 7923 v_1 := v.Args[1] 7924 if v_1.Op != OpAMD64SHLQconst { 7925 break 7926 } 7927 if v_1.AuxInt != 3 { 7928 break 7929 } 7930 idx := v_1.Args[0] 7931 mem := v.Args[2] 7932 v.reset(OpAMD64MOVSDloadidx8) 7933 v.AuxInt = c 7934 v.Aux = sym 7935 v.AddArg(ptr) 7936 v.AddArg(idx) 7937 v.AddArg(mem) 7938 return true 7939 } 7940 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7941 // cond: 7942 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 7943 for { 7944 c := v.AuxInt 7945 sym := v.Aux 7946 v_0 := v.Args[0] 7947 if v_0.Op != OpAMD64ADDQconst { 7948 break 7949 } 7950 d := v_0.AuxInt 7951 ptr := v_0.Args[0] 7952 idx := v.Args[1] 7953 mem := v.Args[2] 7954 v.reset(OpAMD64MOVSDloadidx1) 7955 v.AuxInt = c + d 7956 v.Aux = sym 7957 v.AddArg(ptr) 7958 v.AddArg(idx) 7959 v.AddArg(mem) 7960 return true 7961 } 7962 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7963 // cond: 7964 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 7965 for { 7966 c := v.AuxInt 7967 sym := v.Aux 7968 ptr := v.Args[0] 7969 v_1 := v.Args[1] 7970 if v_1.Op != OpAMD64ADDQconst { 7971 break 7972 } 7973 d := v_1.AuxInt 7974 idx := v_1.Args[0] 7975 mem := v.Args[2] 7976 v.reset(OpAMD64MOVSDloadidx1) 7977 v.AuxInt = c + d 7978 v.Aux = sym 7979 v.AddArg(ptr) 7980 v.AddArg(idx) 7981 v.AddArg(mem) 7982 return true 7983 } 7984 return false 7985 } 7986 func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool { 7987 b := v.Block 7988 _ = b 7989 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 7990 // cond: 7991 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 7992 for { 7993 c := v.AuxInt 7994 sym := v.Aux 7995 v_0 := v.Args[0] 7996 if v_0.Op != OpAMD64ADDQconst { 7997 break 7998 } 7999 d := v_0.AuxInt 8000 ptr := v_0.Args[0] 8001 idx := v.Args[1] 8002 mem := v.Args[2] 8003 v.reset(OpAMD64MOVSDloadidx8) 8004 v.AuxInt = c + d 8005 v.Aux = sym 8006 v.AddArg(ptr) 8007 v.AddArg(idx) 8008 v.AddArg(mem) 8009 return true 8010 } 8011 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 8012 // cond: 8013 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 8014 for { 8015 c := v.AuxInt 8016 sym := v.Aux 8017 ptr := v.Args[0] 8018 v_1 := v.Args[1] 8019 if v_1.Op != OpAMD64ADDQconst { 8020 break 8021 } 8022 d := v_1.AuxInt 8023 idx := v_1.Args[0] 8024 mem := v.Args[2] 8025 v.reset(OpAMD64MOVSDloadidx8) 8026 v.AuxInt = c + 8*d 8027 v.Aux = sym 8028 v.AddArg(ptr) 8029 v.AddArg(idx) 8030 v.AddArg(mem) 8031 return true 8032 } 8033 return false 8034 } 8035 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool { 8036 b := v.Block 8037 _ = b 8038 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8039 // cond: is32Bit(off1+off2) 8040 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 8041 for { 8042 off1 := v.AuxInt 8043 sym := v.Aux 8044 v_0 := v.Args[0] 8045 if v_0.Op != OpAMD64ADDQconst { 8046 break 8047 } 8048 off2 := v_0.AuxInt 8049 ptr := v_0.Args[0] 8050 val := v.Args[1] 8051 mem := v.Args[2] 8052 if !(is32Bit(off1 + off2)) { 8053 break 8054 } 8055 v.reset(OpAMD64MOVSDstore) 8056 v.AuxInt = off1 + off2 8057 v.Aux = sym 8058 v.AddArg(ptr) 8059 v.AddArg(val) 8060 v.AddArg(mem) 8061 return true 8062 } 8063 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8064 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8065 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8066 for { 8067 off1 := v.AuxInt 8068 sym1 := v.Aux 8069 v_0 := v.Args[0] 8070 if v_0.Op != OpAMD64LEAQ { 8071 break 8072 } 8073 off2 := v_0.AuxInt 8074 sym2 := v_0.Aux 8075 base := v_0.Args[0] 8076 val := v.Args[1] 8077 mem := v.Args[2] 8078 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8079 break 8080 } 8081 v.reset(OpAMD64MOVSDstore) 8082 v.AuxInt = off1 + off2 8083 v.Aux = mergeSym(sym1, sym2) 8084 v.AddArg(base) 8085 v.AddArg(val) 8086 v.AddArg(mem) 8087 return true 8088 } 8089 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8090 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8091 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8092 for { 8093 off1 := v.AuxInt 8094 sym1 := v.Aux 8095 v_0 := v.Args[0] 8096 if v_0.Op != OpAMD64LEAQ1 { 8097 break 8098 } 8099 off2 := v_0.AuxInt 8100 sym2 := v_0.Aux 8101 ptr := v_0.Args[0] 8102 idx := v_0.Args[1] 8103 val := v.Args[1] 8104 mem := v.Args[2] 8105 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8106 break 8107 } 8108 v.reset(OpAMD64MOVSDstoreidx1) 8109 v.AuxInt = off1 + off2 8110 v.Aux = mergeSym(sym1, sym2) 8111 v.AddArg(ptr) 8112 v.AddArg(idx) 8113 v.AddArg(val) 8114 v.AddArg(mem) 8115 return true 8116 } 8117 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 8118 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8119 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8120 for { 8121 off1 := v.AuxInt 8122 sym1 := v.Aux 8123 v_0 := v.Args[0] 8124 if v_0.Op != OpAMD64LEAQ8 { 8125 break 8126 } 8127 off2 := v_0.AuxInt 8128 sym2 := v_0.Aux 8129 ptr := v_0.Args[0] 8130 idx := v_0.Args[1] 8131 val := v.Args[1] 8132 mem := v.Args[2] 8133 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8134 break 8135 } 8136 v.reset(OpAMD64MOVSDstoreidx8) 8137 v.AuxInt = off1 + off2 8138 v.Aux = mergeSym(sym1, sym2) 8139 v.AddArg(ptr) 8140 v.AddArg(idx) 8141 v.AddArg(val) 8142 v.AddArg(mem) 8143 return true 8144 } 8145 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 8146 // cond: ptr.Op != OpSB 8147 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 8148 for { 8149 off := v.AuxInt 8150 sym := v.Aux 8151 v_0 := v.Args[0] 8152 if v_0.Op != OpAMD64ADDQ { 8153 break 8154 } 8155 ptr := v_0.Args[0] 8156 idx := v_0.Args[1] 8157 val := v.Args[1] 8158 mem := v.Args[2] 8159 if !(ptr.Op != OpSB) { 8160 break 8161 } 8162 v.reset(OpAMD64MOVSDstoreidx1) 8163 v.AuxInt = off 8164 v.Aux = sym 8165 v.AddArg(ptr) 8166 v.AddArg(idx) 8167 v.AddArg(val) 8168 v.AddArg(mem) 8169 return true 8170 } 8171 return false 8172 } 8173 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool { 8174 b := v.Block 8175 _ = b 8176 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 8177 // cond: 8178 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 8179 for { 8180 c := v.AuxInt 8181 sym := v.Aux 8182 ptr := v.Args[0] 8183 v_1 := v.Args[1] 8184 if v_1.Op != OpAMD64SHLQconst { 8185 break 8186 } 8187 if v_1.AuxInt != 3 { 8188 break 8189 } 8190 idx := v_1.Args[0] 8191 val := v.Args[2] 8192 mem := v.Args[3] 8193 v.reset(OpAMD64MOVSDstoreidx8) 8194 v.AuxInt = c 8195 v.Aux = sym 8196 v.AddArg(ptr) 8197 v.AddArg(idx) 8198 v.AddArg(val) 8199 v.AddArg(mem) 8200 return true 8201 } 8202 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8203 // cond: 8204 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 8205 for { 8206 c := v.AuxInt 8207 sym := v.Aux 8208 v_0 := v.Args[0] 8209 if v_0.Op != OpAMD64ADDQconst { 8210 break 8211 } 8212 d := v_0.AuxInt 8213 ptr := v_0.Args[0] 8214 idx := v.Args[1] 8215 val := v.Args[2] 8216 mem := v.Args[3] 8217 v.reset(OpAMD64MOVSDstoreidx1) 8218 v.AuxInt = c + d 8219 v.Aux = sym 8220 v.AddArg(ptr) 8221 v.AddArg(idx) 8222 v.AddArg(val) 8223 v.AddArg(mem) 8224 return true 8225 } 8226 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8227 // cond: 8228 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 8229 for { 8230 c := v.AuxInt 8231 sym := v.Aux 8232 ptr := v.Args[0] 8233 v_1 := v.Args[1] 8234 if v_1.Op != OpAMD64ADDQconst { 8235 break 8236 } 8237 d := v_1.AuxInt 8238 idx := v_1.Args[0] 8239 val := v.Args[2] 8240 mem := v.Args[3] 8241 v.reset(OpAMD64MOVSDstoreidx1) 8242 v.AuxInt = c + d 8243 v.Aux = sym 8244 v.AddArg(ptr) 8245 v.AddArg(idx) 8246 v.AddArg(val) 8247 v.AddArg(mem) 8248 return true 8249 } 8250 return false 8251 } 8252 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool { 8253 b := v.Block 8254 _ = b 8255 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8256 // cond: 8257 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 8258 for { 8259 c := v.AuxInt 8260 sym := v.Aux 8261 v_0 := v.Args[0] 8262 if v_0.Op != OpAMD64ADDQconst { 8263 break 8264 } 8265 d := v_0.AuxInt 8266 ptr := v_0.Args[0] 8267 idx := v.Args[1] 8268 val := v.Args[2] 8269 mem := v.Args[3] 8270 v.reset(OpAMD64MOVSDstoreidx8) 8271 v.AuxInt = c + d 8272 v.Aux = sym 8273 v.AddArg(ptr) 8274 v.AddArg(idx) 8275 v.AddArg(val) 8276 v.AddArg(mem) 8277 return true 8278 } 8279 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8280 // cond: 8281 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 8282 for { 8283 c := v.AuxInt 8284 sym := v.Aux 8285 ptr := v.Args[0] 8286 v_1 := v.Args[1] 8287 if v_1.Op != OpAMD64ADDQconst { 8288 break 8289 } 8290 d := v_1.AuxInt 8291 idx := v_1.Args[0] 8292 val := v.Args[2] 8293 mem := v.Args[3] 8294 v.reset(OpAMD64MOVSDstoreidx8) 8295 v.AuxInt = c + 8*d 8296 v.Aux = sym 8297 v.AddArg(ptr) 8298 v.AddArg(idx) 8299 v.AddArg(val) 8300 v.AddArg(mem) 8301 return true 8302 } 8303 return false 8304 } 8305 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool { 8306 b := v.Block 8307 _ = b 8308 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 8309 // cond: is32Bit(off1+off2) 8310 // result: (MOVSSload [off1+off2] {sym} ptr mem) 8311 for { 8312 off1 := v.AuxInt 8313 sym := v.Aux 8314 v_0 := v.Args[0] 8315 if v_0.Op != OpAMD64ADDQconst { 8316 break 8317 } 8318 off2 := v_0.AuxInt 8319 ptr := v_0.Args[0] 8320 mem := v.Args[1] 8321 if !(is32Bit(off1 + off2)) { 8322 break 8323 } 8324 v.reset(OpAMD64MOVSSload) 8325 v.AuxInt = off1 + off2 8326 v.Aux = sym 8327 v.AddArg(ptr) 8328 v.AddArg(mem) 8329 return true 8330 } 8331 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8332 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8333 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8334 for { 8335 off1 := v.AuxInt 8336 sym1 := v.Aux 8337 v_0 := v.Args[0] 8338 if v_0.Op != OpAMD64LEAQ { 8339 break 8340 } 8341 off2 := v_0.AuxInt 8342 sym2 := v_0.Aux 8343 base := v_0.Args[0] 8344 mem := v.Args[1] 8345 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8346 break 8347 } 8348 v.reset(OpAMD64MOVSSload) 8349 v.AuxInt = off1 + off2 8350 v.Aux = mergeSym(sym1, sym2) 8351 v.AddArg(base) 8352 v.AddArg(mem) 8353 return true 8354 } 8355 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8356 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8357 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8358 for { 8359 off1 := v.AuxInt 8360 sym1 := v.Aux 8361 v_0 := v.Args[0] 8362 if v_0.Op != OpAMD64LEAQ1 { 8363 break 8364 } 8365 off2 := v_0.AuxInt 8366 sym2 := v_0.Aux 8367 ptr := v_0.Args[0] 8368 idx := v_0.Args[1] 8369 mem := v.Args[1] 8370 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8371 break 8372 } 8373 v.reset(OpAMD64MOVSSloadidx1) 8374 v.AuxInt = off1 + off2 8375 v.Aux = mergeSym(sym1, sym2) 8376 v.AddArg(ptr) 8377 v.AddArg(idx) 8378 v.AddArg(mem) 8379 return true 8380 } 8381 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 8382 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8383 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8384 for { 8385 off1 := v.AuxInt 8386 sym1 := v.Aux 8387 v_0 := v.Args[0] 8388 if v_0.Op != OpAMD64LEAQ4 { 8389 break 8390 } 8391 off2 := v_0.AuxInt 8392 sym2 := v_0.Aux 8393 ptr := v_0.Args[0] 8394 idx := v_0.Args[1] 8395 mem := v.Args[1] 8396 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8397 break 8398 } 8399 v.reset(OpAMD64MOVSSloadidx4) 8400 v.AuxInt = off1 + off2 8401 v.Aux = mergeSym(sym1, sym2) 8402 v.AddArg(ptr) 8403 v.AddArg(idx) 8404 v.AddArg(mem) 8405 return true 8406 } 8407 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 8408 // cond: ptr.Op != OpSB 8409 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 8410 for { 8411 off := v.AuxInt 8412 sym := v.Aux 8413 v_0 := v.Args[0] 8414 if v_0.Op != OpAMD64ADDQ { 8415 break 8416 } 8417 ptr := v_0.Args[0] 8418 idx := v_0.Args[1] 8419 mem := v.Args[1] 8420 if !(ptr.Op != OpSB) { 8421 break 8422 } 8423 v.reset(OpAMD64MOVSSloadidx1) 8424 v.AuxInt = off 8425 v.Aux = sym 8426 v.AddArg(ptr) 8427 v.AddArg(idx) 8428 v.AddArg(mem) 8429 return true 8430 } 8431 return false 8432 } 8433 func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool { 8434 b := v.Block 8435 _ = b 8436 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 8437 // cond: 8438 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 8439 for { 8440 c := v.AuxInt 8441 sym := v.Aux 8442 ptr := v.Args[0] 8443 v_1 := v.Args[1] 8444 if v_1.Op != OpAMD64SHLQconst { 8445 break 8446 } 8447 if v_1.AuxInt != 2 { 8448 break 8449 } 8450 idx := v_1.Args[0] 8451 mem := v.Args[2] 8452 v.reset(OpAMD64MOVSSloadidx4) 8453 v.AuxInt = c 8454 v.Aux = sym 8455 v.AddArg(ptr) 8456 v.AddArg(idx) 8457 v.AddArg(mem) 8458 return true 8459 } 8460 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8461 // cond: 8462 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 8463 for { 8464 c := v.AuxInt 8465 sym := v.Aux 8466 v_0 := v.Args[0] 8467 if v_0.Op != OpAMD64ADDQconst { 8468 break 8469 } 8470 d := v_0.AuxInt 8471 ptr := v_0.Args[0] 8472 idx := v.Args[1] 8473 mem := v.Args[2] 8474 v.reset(OpAMD64MOVSSloadidx1) 8475 v.AuxInt = c + d 8476 v.Aux = sym 8477 v.AddArg(ptr) 8478 v.AddArg(idx) 8479 v.AddArg(mem) 8480 return true 8481 } 8482 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8483 // cond: 8484 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 8485 for { 8486 c := v.AuxInt 8487 sym := v.Aux 8488 ptr := v.Args[0] 8489 v_1 := v.Args[1] 8490 if v_1.Op != OpAMD64ADDQconst { 8491 break 8492 } 8493 d := v_1.AuxInt 8494 idx := v_1.Args[0] 8495 mem := v.Args[2] 8496 v.reset(OpAMD64MOVSSloadidx1) 8497 v.AuxInt = c + d 8498 v.Aux = sym 8499 v.AddArg(ptr) 8500 v.AddArg(idx) 8501 v.AddArg(mem) 8502 return true 8503 } 8504 return false 8505 } 8506 func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool { 8507 b := v.Block 8508 _ = b 8509 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 8510 // cond: 8511 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 8512 for { 8513 c := v.AuxInt 8514 sym := v.Aux 8515 v_0 := v.Args[0] 8516 if v_0.Op != OpAMD64ADDQconst { 8517 break 8518 } 8519 d := v_0.AuxInt 8520 ptr := v_0.Args[0] 8521 idx := v.Args[1] 8522 mem := v.Args[2] 8523 v.reset(OpAMD64MOVSSloadidx4) 8524 v.AuxInt = c + d 8525 v.Aux = sym 8526 v.AddArg(ptr) 8527 v.AddArg(idx) 8528 v.AddArg(mem) 8529 return true 8530 } 8531 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 8532 // cond: 8533 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 8534 for { 8535 c := v.AuxInt 8536 sym := v.Aux 8537 ptr := v.Args[0] 8538 v_1 := v.Args[1] 8539 if v_1.Op != OpAMD64ADDQconst { 8540 break 8541 } 8542 d := v_1.AuxInt 8543 idx := v_1.Args[0] 8544 mem := v.Args[2] 8545 v.reset(OpAMD64MOVSSloadidx4) 8546 v.AuxInt = c + 4*d 8547 v.Aux = sym 8548 v.AddArg(ptr) 8549 v.AddArg(idx) 8550 v.AddArg(mem) 8551 return true 8552 } 8553 return false 8554 } 8555 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool { 8556 b := v.Block 8557 _ = b 8558 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8559 // cond: is32Bit(off1+off2) 8560 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 8561 for { 8562 off1 := v.AuxInt 8563 sym := v.Aux 8564 v_0 := v.Args[0] 8565 if v_0.Op != OpAMD64ADDQconst { 8566 break 8567 } 8568 off2 := v_0.AuxInt 8569 ptr := v_0.Args[0] 8570 val := v.Args[1] 8571 mem := v.Args[2] 8572 if !(is32Bit(off1 + off2)) { 8573 break 8574 } 8575 v.reset(OpAMD64MOVSSstore) 8576 v.AuxInt = off1 + off2 8577 v.Aux = sym 8578 v.AddArg(ptr) 8579 v.AddArg(val) 8580 v.AddArg(mem) 8581 return true 8582 } 8583 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8584 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8585 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8586 for { 8587 off1 := v.AuxInt 8588 sym1 := v.Aux 8589 v_0 := v.Args[0] 8590 if v_0.Op != OpAMD64LEAQ { 8591 break 8592 } 8593 off2 := v_0.AuxInt 8594 sym2 := v_0.Aux 8595 base := v_0.Args[0] 8596 val := v.Args[1] 8597 mem := v.Args[2] 8598 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8599 break 8600 } 8601 v.reset(OpAMD64MOVSSstore) 8602 v.AuxInt = off1 + off2 8603 v.Aux = mergeSym(sym1, sym2) 8604 v.AddArg(base) 8605 v.AddArg(val) 8606 v.AddArg(mem) 8607 return true 8608 } 8609 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8610 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8611 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8612 for { 8613 off1 := v.AuxInt 8614 sym1 := v.Aux 8615 v_0 := v.Args[0] 8616 if v_0.Op != OpAMD64LEAQ1 { 8617 break 8618 } 8619 off2 := v_0.AuxInt 8620 sym2 := v_0.Aux 8621 ptr := v_0.Args[0] 8622 idx := v_0.Args[1] 8623 val := v.Args[1] 8624 mem := v.Args[2] 8625 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8626 break 8627 } 8628 v.reset(OpAMD64MOVSSstoreidx1) 8629 v.AuxInt = off1 + off2 8630 v.Aux = mergeSym(sym1, sym2) 8631 v.AddArg(ptr) 8632 v.AddArg(idx) 8633 v.AddArg(val) 8634 v.AddArg(mem) 8635 return true 8636 } 8637 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 8638 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8639 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8640 for { 8641 off1 := v.AuxInt 8642 sym1 := v.Aux 8643 v_0 := v.Args[0] 8644 if v_0.Op != OpAMD64LEAQ4 { 8645 break 8646 } 8647 off2 := v_0.AuxInt 8648 sym2 := v_0.Aux 8649 ptr := v_0.Args[0] 8650 idx := v_0.Args[1] 8651 val := v.Args[1] 8652 mem := v.Args[2] 8653 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8654 break 8655 } 8656 v.reset(OpAMD64MOVSSstoreidx4) 8657 v.AuxInt = off1 + off2 8658 v.Aux = mergeSym(sym1, sym2) 8659 v.AddArg(ptr) 8660 v.AddArg(idx) 8661 v.AddArg(val) 8662 v.AddArg(mem) 8663 return true 8664 } 8665 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 8666 // cond: ptr.Op != OpSB 8667 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 8668 for { 8669 off := v.AuxInt 8670 sym := v.Aux 8671 v_0 := v.Args[0] 8672 if v_0.Op != OpAMD64ADDQ { 8673 break 8674 } 8675 ptr := v_0.Args[0] 8676 idx := v_0.Args[1] 8677 val := v.Args[1] 8678 mem := v.Args[2] 8679 if !(ptr.Op != OpSB) { 8680 break 8681 } 8682 v.reset(OpAMD64MOVSSstoreidx1) 8683 v.AuxInt = off 8684 v.Aux = sym 8685 v.AddArg(ptr) 8686 v.AddArg(idx) 8687 v.AddArg(val) 8688 v.AddArg(mem) 8689 return true 8690 } 8691 return false 8692 } 8693 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool { 8694 b := v.Block 8695 _ = b 8696 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 8697 // cond: 8698 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 8699 for { 8700 c := v.AuxInt 8701 sym := v.Aux 8702 ptr := v.Args[0] 8703 v_1 := v.Args[1] 8704 if v_1.Op != OpAMD64SHLQconst { 8705 break 8706 } 8707 if v_1.AuxInt != 2 { 8708 break 8709 } 8710 idx := v_1.Args[0] 8711 val := v.Args[2] 8712 mem := v.Args[3] 8713 v.reset(OpAMD64MOVSSstoreidx4) 8714 v.AuxInt = c 8715 v.Aux = sym 8716 v.AddArg(ptr) 8717 v.AddArg(idx) 8718 v.AddArg(val) 8719 v.AddArg(mem) 8720 return true 8721 } 8722 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8723 // cond: 8724 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 8725 for { 8726 c := v.AuxInt 8727 sym := v.Aux 8728 v_0 := v.Args[0] 8729 if v_0.Op != OpAMD64ADDQconst { 8730 break 8731 } 8732 d := v_0.AuxInt 8733 ptr := v_0.Args[0] 8734 idx := v.Args[1] 8735 val := v.Args[2] 8736 mem := v.Args[3] 8737 v.reset(OpAMD64MOVSSstoreidx1) 8738 v.AuxInt = c + d 8739 v.Aux = sym 8740 v.AddArg(ptr) 8741 v.AddArg(idx) 8742 v.AddArg(val) 8743 v.AddArg(mem) 8744 return true 8745 } 8746 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8747 // cond: 8748 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 8749 for { 8750 c := v.AuxInt 8751 sym := v.Aux 8752 ptr := v.Args[0] 8753 v_1 := v.Args[1] 8754 if v_1.Op != OpAMD64ADDQconst { 8755 break 8756 } 8757 d := v_1.AuxInt 8758 idx := v_1.Args[0] 8759 val := v.Args[2] 8760 mem := v.Args[3] 8761 v.reset(OpAMD64MOVSSstoreidx1) 8762 v.AuxInt = c + d 8763 v.Aux = sym 8764 v.AddArg(ptr) 8765 v.AddArg(idx) 8766 v.AddArg(val) 8767 v.AddArg(mem) 8768 return true 8769 } 8770 return false 8771 } 8772 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool { 8773 b := v.Block 8774 _ = b 8775 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8776 // cond: 8777 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 8778 for { 8779 c := v.AuxInt 8780 sym := v.Aux 8781 v_0 := v.Args[0] 8782 if v_0.Op != OpAMD64ADDQconst { 8783 break 8784 } 8785 d := v_0.AuxInt 8786 ptr := v_0.Args[0] 8787 idx := v.Args[1] 8788 val := v.Args[2] 8789 mem := v.Args[3] 8790 v.reset(OpAMD64MOVSSstoreidx4) 8791 v.AuxInt = c + d 8792 v.Aux = sym 8793 v.AddArg(ptr) 8794 v.AddArg(idx) 8795 v.AddArg(val) 8796 v.AddArg(mem) 8797 return true 8798 } 8799 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8800 // cond: 8801 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 8802 for { 8803 c := v.AuxInt 8804 sym := v.Aux 8805 ptr := v.Args[0] 8806 v_1 := v.Args[1] 8807 if v_1.Op != OpAMD64ADDQconst { 8808 break 8809 } 8810 d := v_1.AuxInt 8811 idx := v_1.Args[0] 8812 val := v.Args[2] 8813 mem := v.Args[3] 8814 v.reset(OpAMD64MOVSSstoreidx4) 8815 v.AuxInt = c + 4*d 8816 v.Aux = sym 8817 v.AddArg(ptr) 8818 v.AddArg(idx) 8819 v.AddArg(val) 8820 v.AddArg(mem) 8821 return true 8822 } 8823 return false 8824 } 8825 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { 8826 b := v.Block 8827 _ = b 8828 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 8829 // cond: x.Uses == 1 && clobber(x) 8830 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 8831 for { 8832 x := v.Args[0] 8833 if x.Op != OpAMD64MOVWload { 8834 break 8835 } 8836 off := x.AuxInt 8837 sym := x.Aux 8838 ptr := x.Args[0] 8839 mem := x.Args[1] 8840 if !(x.Uses == 1 && clobber(x)) { 8841 break 8842 } 8843 b = x.Block 8844 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 8845 v.reset(OpCopy) 8846 v.AddArg(v0) 8847 v0.AuxInt = off 8848 v0.Aux = sym 8849 v0.AddArg(ptr) 8850 v0.AddArg(mem) 8851 return true 8852 } 8853 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 8854 // cond: x.Uses == 1 && clobber(x) 8855 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 8856 for { 8857 x := v.Args[0] 8858 if x.Op != OpAMD64MOVLload { 8859 break 8860 } 8861 off := x.AuxInt 8862 sym := x.Aux 8863 ptr := x.Args[0] 8864 mem := x.Args[1] 8865 if !(x.Uses == 1 && clobber(x)) { 8866 break 8867 } 8868 b = x.Block 8869 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 8870 v.reset(OpCopy) 8871 v.AddArg(v0) 8872 v0.AuxInt = off 8873 v0.Aux = sym 8874 v0.AddArg(ptr) 8875 v0.AddArg(mem) 8876 return true 8877 } 8878 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 8879 // cond: x.Uses == 1 && clobber(x) 8880 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 8881 for { 8882 x := v.Args[0] 8883 if x.Op != OpAMD64MOVQload { 8884 break 8885 } 8886 off := x.AuxInt 8887 sym := x.Aux 8888 ptr := x.Args[0] 8889 mem := x.Args[1] 8890 if !(x.Uses == 1 && clobber(x)) { 8891 break 8892 } 8893 b = x.Block 8894 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 8895 v.reset(OpCopy) 8896 v.AddArg(v0) 8897 v0.AuxInt = off 8898 v0.Aux = sym 8899 v0.AddArg(ptr) 8900 v0.AddArg(mem) 8901 return true 8902 } 8903 // match: (MOVWQSX (ANDLconst [c] x)) 8904 // cond: c & 0x8000 == 0 8905 // result: (ANDLconst [c & 0x7fff] x) 8906 for { 8907 v_0 := v.Args[0] 8908 if v_0.Op != OpAMD64ANDLconst { 8909 break 8910 } 8911 c := v_0.AuxInt 8912 x := v_0.Args[0] 8913 if !(c&0x8000 == 0) { 8914 break 8915 } 8916 v.reset(OpAMD64ANDLconst) 8917 v.AuxInt = c & 0x7fff 8918 v.AddArg(x) 8919 return true 8920 } 8921 return false 8922 } 8923 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool { 8924 b := v.Block 8925 _ = b 8926 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8927 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8928 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8929 for { 8930 off1 := v.AuxInt 8931 sym1 := v.Aux 8932 v_0 := v.Args[0] 8933 if v_0.Op != OpAMD64LEAQ { 8934 break 8935 } 8936 off2 := v_0.AuxInt 8937 sym2 := v_0.Aux 8938 base := v_0.Args[0] 8939 mem := v.Args[1] 8940 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8941 break 8942 } 8943 v.reset(OpAMD64MOVWQSXload) 8944 v.AuxInt = off1 + off2 8945 v.Aux = mergeSym(sym1, sym2) 8946 v.AddArg(base) 8947 v.AddArg(mem) 8948 return true 8949 } 8950 return false 8951 } 8952 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { 8953 b := v.Block 8954 _ = b 8955 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 8956 // cond: x.Uses == 1 && clobber(x) 8957 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 8958 for { 8959 x := v.Args[0] 8960 if x.Op != OpAMD64MOVWload { 8961 break 8962 } 8963 off := x.AuxInt 8964 sym := x.Aux 8965 ptr := x.Args[0] 8966 mem := x.Args[1] 8967 if !(x.Uses == 1 && clobber(x)) { 8968 break 8969 } 8970 b = x.Block 8971 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 8972 v.reset(OpCopy) 8973 v.AddArg(v0) 8974 v0.AuxInt = off 8975 v0.Aux = sym 8976 v0.AddArg(ptr) 8977 v0.AddArg(mem) 8978 return true 8979 } 8980 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 8981 // cond: x.Uses == 1 && clobber(x) 8982 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 8983 for { 8984 x := v.Args[0] 8985 if x.Op != OpAMD64MOVLload { 8986 break 8987 } 8988 off := x.AuxInt 8989 sym := x.Aux 8990 ptr := x.Args[0] 8991 mem := x.Args[1] 8992 if !(x.Uses == 1 && clobber(x)) { 8993 break 8994 } 8995 b = x.Block 8996 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 8997 v.reset(OpCopy) 8998 v.AddArg(v0) 8999 v0.AuxInt = off 9000 v0.Aux = sym 9001 v0.AddArg(ptr) 9002 v0.AddArg(mem) 9003 return true 9004 } 9005 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 9006 // cond: x.Uses == 1 && clobber(x) 9007 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 9008 for { 9009 x := v.Args[0] 9010 if x.Op != OpAMD64MOVQload { 9011 break 9012 } 9013 off := x.AuxInt 9014 sym := x.Aux 9015 ptr := x.Args[0] 9016 mem := x.Args[1] 9017 if !(x.Uses == 1 && clobber(x)) { 9018 break 9019 } 9020 b = x.Block 9021 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 9022 v.reset(OpCopy) 9023 v.AddArg(v0) 9024 v0.AuxInt = off 9025 v0.Aux = sym 9026 v0.AddArg(ptr) 9027 v0.AddArg(mem) 9028 return true 9029 } 9030 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 9031 // cond: x.Uses == 1 && clobber(x) 9032 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 9033 for { 9034 x := v.Args[0] 9035 if x.Op != OpAMD64MOVWloadidx1 { 9036 break 9037 } 9038 off := x.AuxInt 9039 sym := x.Aux 9040 ptr := x.Args[0] 9041 idx := x.Args[1] 9042 mem := x.Args[2] 9043 if !(x.Uses == 1 && clobber(x)) { 9044 break 9045 } 9046 b = x.Block 9047 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 9048 v.reset(OpCopy) 9049 v.AddArg(v0) 9050 v0.AuxInt = off 9051 v0.Aux = sym 9052 v0.AddArg(ptr) 9053 v0.AddArg(idx) 9054 v0.AddArg(mem) 9055 return true 9056 } 9057 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 9058 // cond: x.Uses == 1 && clobber(x) 9059 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 9060 for { 9061 x := v.Args[0] 9062 if x.Op != OpAMD64MOVWloadidx2 { 9063 break 9064 } 9065 off := x.AuxInt 9066 sym := x.Aux 9067 ptr := x.Args[0] 9068 idx := x.Args[1] 9069 mem := x.Args[2] 9070 if !(x.Uses == 1 && clobber(x)) { 9071 break 9072 } 9073 b = x.Block 9074 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 9075 v.reset(OpCopy) 9076 v.AddArg(v0) 9077 v0.AuxInt = off 9078 v0.Aux = sym 9079 v0.AddArg(ptr) 9080 v0.AddArg(idx) 9081 v0.AddArg(mem) 9082 return true 9083 } 9084 // match: (MOVWQZX (ANDLconst [c] x)) 9085 // cond: 9086 // result: (ANDLconst [c & 0xffff] x) 9087 for { 9088 v_0 := v.Args[0] 9089 if v_0.Op != OpAMD64ANDLconst { 9090 break 9091 } 9092 c := v_0.AuxInt 9093 x := v_0.Args[0] 9094 v.reset(OpAMD64ANDLconst) 9095 v.AuxInt = c & 0xffff 9096 v.AddArg(x) 9097 return true 9098 } 9099 return false 9100 } 9101 func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool { 9102 b := v.Block 9103 _ = b 9104 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 9105 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 9106 // result: x 9107 for { 9108 off := v.AuxInt 9109 sym := v.Aux 9110 ptr := v.Args[0] 9111 v_1 := v.Args[1] 9112 if v_1.Op != OpAMD64MOVWstore { 9113 break 9114 } 9115 off2 := v_1.AuxInt 9116 sym2 := v_1.Aux 9117 ptr2 := v_1.Args[0] 9118 x := v_1.Args[1] 9119 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 9120 break 9121 } 9122 v.reset(OpCopy) 9123 v.Type = x.Type 9124 v.AddArg(x) 9125 return true 9126 } 9127 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 9128 // cond: is32Bit(off1+off2) 9129 // result: (MOVWload [off1+off2] {sym} ptr mem) 9130 for { 9131 off1 := v.AuxInt 9132 sym := v.Aux 9133 v_0 := v.Args[0] 9134 if v_0.Op != OpAMD64ADDQconst { 9135 break 9136 } 9137 off2 := v_0.AuxInt 9138 ptr := v_0.Args[0] 9139 mem := v.Args[1] 9140 if !(is32Bit(off1 + off2)) { 9141 break 9142 } 9143 v.reset(OpAMD64MOVWload) 9144 v.AuxInt = off1 + off2 9145 v.Aux = sym 9146 v.AddArg(ptr) 9147 v.AddArg(mem) 9148 return true 9149 } 9150 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9151 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9152 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9153 for { 9154 off1 := v.AuxInt 9155 sym1 := v.Aux 9156 v_0 := v.Args[0] 9157 if v_0.Op != OpAMD64LEAQ { 9158 break 9159 } 9160 off2 := v_0.AuxInt 9161 sym2 := v_0.Aux 9162 base := v_0.Args[0] 9163 mem := v.Args[1] 9164 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9165 break 9166 } 9167 v.reset(OpAMD64MOVWload) 9168 v.AuxInt = off1 + off2 9169 v.Aux = mergeSym(sym1, sym2) 9170 v.AddArg(base) 9171 v.AddArg(mem) 9172 return true 9173 } 9174 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9175 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9176 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9177 for { 9178 off1 := v.AuxInt 9179 sym1 := v.Aux 9180 v_0 := v.Args[0] 9181 if v_0.Op != OpAMD64LEAQ1 { 9182 break 9183 } 9184 off2 := v_0.AuxInt 9185 sym2 := v_0.Aux 9186 ptr := v_0.Args[0] 9187 idx := v_0.Args[1] 9188 mem := v.Args[1] 9189 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9190 break 9191 } 9192 v.reset(OpAMD64MOVWloadidx1) 9193 v.AuxInt = off1 + off2 9194 v.Aux = mergeSym(sym1, sym2) 9195 v.AddArg(ptr) 9196 v.AddArg(idx) 9197 v.AddArg(mem) 9198 return true 9199 } 9200 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 9201 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9202 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9203 for { 9204 off1 := v.AuxInt 9205 sym1 := v.Aux 9206 v_0 := v.Args[0] 9207 if v_0.Op != OpAMD64LEAQ2 { 9208 break 9209 } 9210 off2 := v_0.AuxInt 9211 sym2 := v_0.Aux 9212 ptr := v_0.Args[0] 9213 idx := v_0.Args[1] 9214 mem := v.Args[1] 9215 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9216 break 9217 } 9218 v.reset(OpAMD64MOVWloadidx2) 9219 v.AuxInt = off1 + off2 9220 v.Aux = mergeSym(sym1, sym2) 9221 v.AddArg(ptr) 9222 v.AddArg(idx) 9223 v.AddArg(mem) 9224 return true 9225 } 9226 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 9227 // cond: ptr.Op != OpSB 9228 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 9229 for { 9230 off := v.AuxInt 9231 sym := v.Aux 9232 v_0 := v.Args[0] 9233 if v_0.Op != OpAMD64ADDQ { 9234 break 9235 } 9236 ptr := v_0.Args[0] 9237 idx := v_0.Args[1] 9238 mem := v.Args[1] 9239 if !(ptr.Op != OpSB) { 9240 break 9241 } 9242 v.reset(OpAMD64MOVWloadidx1) 9243 v.AuxInt = off 9244 v.Aux = sym 9245 v.AddArg(ptr) 9246 v.AddArg(idx) 9247 v.AddArg(mem) 9248 return true 9249 } 9250 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 9251 // cond: canMergeSym(sym1, sym2) 9252 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9253 for { 9254 off1 := v.AuxInt 9255 sym1 := v.Aux 9256 v_0 := v.Args[0] 9257 if v_0.Op != OpAMD64LEAL { 9258 break 9259 } 9260 off2 := v_0.AuxInt 9261 sym2 := v_0.Aux 9262 base := v_0.Args[0] 9263 mem := v.Args[1] 9264 if !(canMergeSym(sym1, sym2)) { 9265 break 9266 } 9267 v.reset(OpAMD64MOVWload) 9268 v.AuxInt = off1 + off2 9269 v.Aux = mergeSym(sym1, sym2) 9270 v.AddArg(base) 9271 v.AddArg(mem) 9272 return true 9273 } 9274 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 9275 // cond: is32Bit(off1+off2) 9276 // result: (MOVWload [off1+off2] {sym} ptr mem) 9277 for { 9278 off1 := v.AuxInt 9279 sym := v.Aux 9280 v_0 := v.Args[0] 9281 if v_0.Op != OpAMD64ADDLconst { 9282 break 9283 } 9284 off2 := v_0.AuxInt 9285 ptr := v_0.Args[0] 9286 mem := v.Args[1] 9287 if !(is32Bit(off1 + off2)) { 9288 break 9289 } 9290 v.reset(OpAMD64MOVWload) 9291 v.AuxInt = off1 + off2 9292 v.Aux = sym 9293 v.AddArg(ptr) 9294 v.AddArg(mem) 9295 return true 9296 } 9297 return false 9298 } 9299 func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool { 9300 b := v.Block 9301 _ = b 9302 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 9303 // cond: 9304 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 9305 for { 9306 c := v.AuxInt 9307 sym := v.Aux 9308 ptr := v.Args[0] 9309 v_1 := v.Args[1] 9310 if v_1.Op != OpAMD64SHLQconst { 9311 break 9312 } 9313 if v_1.AuxInt != 1 { 9314 break 9315 } 9316 idx := v_1.Args[0] 9317 mem := v.Args[2] 9318 v.reset(OpAMD64MOVWloadidx2) 9319 v.AuxInt = c 9320 v.Aux = sym 9321 v.AddArg(ptr) 9322 v.AddArg(idx) 9323 v.AddArg(mem) 9324 return true 9325 } 9326 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9327 // cond: 9328 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 9329 for { 9330 c := v.AuxInt 9331 sym := v.Aux 9332 v_0 := v.Args[0] 9333 if v_0.Op != OpAMD64ADDQconst { 9334 break 9335 } 9336 d := v_0.AuxInt 9337 ptr := v_0.Args[0] 9338 idx := v.Args[1] 9339 mem := v.Args[2] 9340 v.reset(OpAMD64MOVWloadidx1) 9341 v.AuxInt = c + d 9342 v.Aux = sym 9343 v.AddArg(ptr) 9344 v.AddArg(idx) 9345 v.AddArg(mem) 9346 return true 9347 } 9348 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9349 // cond: 9350 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 9351 for { 9352 c := v.AuxInt 9353 sym := v.Aux 9354 ptr := v.Args[0] 9355 v_1 := v.Args[1] 9356 if v_1.Op != OpAMD64ADDQconst { 9357 break 9358 } 9359 d := v_1.AuxInt 9360 idx := v_1.Args[0] 9361 mem := v.Args[2] 9362 v.reset(OpAMD64MOVWloadidx1) 9363 v.AuxInt = c + d 9364 v.Aux = sym 9365 v.AddArg(ptr) 9366 v.AddArg(idx) 9367 v.AddArg(mem) 9368 return true 9369 } 9370 return false 9371 } 9372 func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool { 9373 b := v.Block 9374 _ = b 9375 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 9376 // cond: 9377 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 9378 for { 9379 c := v.AuxInt 9380 sym := v.Aux 9381 v_0 := v.Args[0] 9382 if v_0.Op != OpAMD64ADDQconst { 9383 break 9384 } 9385 d := v_0.AuxInt 9386 ptr := v_0.Args[0] 9387 idx := v.Args[1] 9388 mem := v.Args[2] 9389 v.reset(OpAMD64MOVWloadidx2) 9390 v.AuxInt = c + d 9391 v.Aux = sym 9392 v.AddArg(ptr) 9393 v.AddArg(idx) 9394 v.AddArg(mem) 9395 return true 9396 } 9397 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 9398 // cond: 9399 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 9400 for { 9401 c := v.AuxInt 9402 sym := v.Aux 9403 ptr := v.Args[0] 9404 v_1 := v.Args[1] 9405 if v_1.Op != OpAMD64ADDQconst { 9406 break 9407 } 9408 d := v_1.AuxInt 9409 idx := v_1.Args[0] 9410 mem := v.Args[2] 9411 v.reset(OpAMD64MOVWloadidx2) 9412 v.AuxInt = c + 2*d 9413 v.Aux = sym 9414 v.AddArg(ptr) 9415 v.AddArg(idx) 9416 v.AddArg(mem) 9417 return true 9418 } 9419 return false 9420 } 9421 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { 9422 b := v.Block 9423 _ = b 9424 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 9425 // cond: 9426 // result: (MOVWstore [off] {sym} ptr x mem) 9427 for { 9428 off := v.AuxInt 9429 sym := v.Aux 9430 ptr := v.Args[0] 9431 v_1 := v.Args[1] 9432 if v_1.Op != OpAMD64MOVWQSX { 9433 break 9434 } 9435 x := v_1.Args[0] 9436 mem := v.Args[2] 9437 v.reset(OpAMD64MOVWstore) 9438 v.AuxInt = off 9439 v.Aux = sym 9440 v.AddArg(ptr) 9441 v.AddArg(x) 9442 v.AddArg(mem) 9443 return true 9444 } 9445 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 9446 // cond: 9447 // result: (MOVWstore [off] {sym} ptr x mem) 9448 for { 9449 off := v.AuxInt 9450 sym := v.Aux 9451 ptr := v.Args[0] 9452 v_1 := v.Args[1] 9453 if v_1.Op != OpAMD64MOVWQZX { 9454 break 9455 } 9456 x := v_1.Args[0] 9457 mem := v.Args[2] 9458 v.reset(OpAMD64MOVWstore) 9459 v.AuxInt = off 9460 v.Aux = sym 9461 v.AddArg(ptr) 9462 v.AddArg(x) 9463 v.AddArg(mem) 9464 return true 9465 } 9466 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9467 // cond: is32Bit(off1+off2) 9468 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 9469 for { 9470 off1 := v.AuxInt 9471 sym := v.Aux 9472 v_0 := v.Args[0] 9473 if v_0.Op != OpAMD64ADDQconst { 9474 break 9475 } 9476 off2 := v_0.AuxInt 9477 ptr := v_0.Args[0] 9478 val := v.Args[1] 9479 mem := v.Args[2] 9480 if !(is32Bit(off1 + off2)) { 9481 break 9482 } 9483 v.reset(OpAMD64MOVWstore) 9484 v.AuxInt = off1 + off2 9485 v.Aux = sym 9486 v.AddArg(ptr) 9487 v.AddArg(val) 9488 v.AddArg(mem) 9489 return true 9490 } 9491 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 9492 // cond: validOff(off) 9493 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 9494 for { 9495 off := v.AuxInt 9496 sym := v.Aux 9497 ptr := v.Args[0] 9498 v_1 := v.Args[1] 9499 if v_1.Op != OpAMD64MOVLconst { 9500 break 9501 } 9502 c := v_1.AuxInt 9503 mem := v.Args[2] 9504 if !(validOff(off)) { 9505 break 9506 } 9507 v.reset(OpAMD64MOVWstoreconst) 9508 v.AuxInt = makeValAndOff(int64(int16(c)), off) 9509 v.Aux = sym 9510 v.AddArg(ptr) 9511 v.AddArg(mem) 9512 return true 9513 } 9514 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9515 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9516 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9517 for { 9518 off1 := v.AuxInt 9519 sym1 := v.Aux 9520 v_0 := v.Args[0] 9521 if v_0.Op != OpAMD64LEAQ { 9522 break 9523 } 9524 off2 := v_0.AuxInt 9525 sym2 := v_0.Aux 9526 base := v_0.Args[0] 9527 val := v.Args[1] 9528 mem := v.Args[2] 9529 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9530 break 9531 } 9532 v.reset(OpAMD64MOVWstore) 9533 v.AuxInt = off1 + off2 9534 v.Aux = mergeSym(sym1, sym2) 9535 v.AddArg(base) 9536 v.AddArg(val) 9537 v.AddArg(mem) 9538 return true 9539 } 9540 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9541 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9542 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9543 for { 9544 off1 := v.AuxInt 9545 sym1 := v.Aux 9546 v_0 := v.Args[0] 9547 if v_0.Op != OpAMD64LEAQ1 { 9548 break 9549 } 9550 off2 := v_0.AuxInt 9551 sym2 := v_0.Aux 9552 ptr := v_0.Args[0] 9553 idx := v_0.Args[1] 9554 val := v.Args[1] 9555 mem := v.Args[2] 9556 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9557 break 9558 } 9559 v.reset(OpAMD64MOVWstoreidx1) 9560 v.AuxInt = off1 + off2 9561 v.Aux = mergeSym(sym1, sym2) 9562 v.AddArg(ptr) 9563 v.AddArg(idx) 9564 v.AddArg(val) 9565 v.AddArg(mem) 9566 return true 9567 } 9568 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 9569 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9570 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9571 for { 9572 off1 := v.AuxInt 9573 sym1 := v.Aux 9574 v_0 := v.Args[0] 9575 if v_0.Op != OpAMD64LEAQ2 { 9576 break 9577 } 9578 off2 := v_0.AuxInt 9579 sym2 := v_0.Aux 9580 ptr := v_0.Args[0] 9581 idx := v_0.Args[1] 9582 val := v.Args[1] 9583 mem := v.Args[2] 9584 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9585 break 9586 } 9587 v.reset(OpAMD64MOVWstoreidx2) 9588 v.AuxInt = off1 + off2 9589 v.Aux = mergeSym(sym1, sym2) 9590 v.AddArg(ptr) 9591 v.AddArg(idx) 9592 v.AddArg(val) 9593 v.AddArg(mem) 9594 return true 9595 } 9596 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 9597 // cond: ptr.Op != OpSB 9598 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 9599 for { 9600 off := v.AuxInt 9601 sym := v.Aux 9602 v_0 := v.Args[0] 9603 if v_0.Op != OpAMD64ADDQ { 9604 break 9605 } 9606 ptr := v_0.Args[0] 9607 idx := v_0.Args[1] 9608 val := v.Args[1] 9609 mem := v.Args[2] 9610 if !(ptr.Op != OpSB) { 9611 break 9612 } 9613 v.reset(OpAMD64MOVWstoreidx1) 9614 v.AuxInt = off 9615 v.Aux = sym 9616 v.AddArg(ptr) 9617 v.AddArg(idx) 9618 v.AddArg(val) 9619 v.AddArg(mem) 9620 return true 9621 } 9622 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 9623 // cond: x.Uses == 1 && clobber(x) 9624 // result: (MOVLstore [i-2] {s} p w mem) 9625 for { 9626 i := v.AuxInt 9627 s := v.Aux 9628 p := v.Args[0] 9629 v_1 := v.Args[1] 9630 if v_1.Op != OpAMD64SHRQconst { 9631 break 9632 } 9633 if v_1.AuxInt != 16 { 9634 break 9635 } 9636 w := v_1.Args[0] 9637 x := v.Args[2] 9638 if x.Op != OpAMD64MOVWstore { 9639 break 9640 } 9641 if x.AuxInt != i-2 { 9642 break 9643 } 9644 if x.Aux != s { 9645 break 9646 } 9647 if p != x.Args[0] { 9648 break 9649 } 9650 if w != x.Args[1] { 9651 break 9652 } 9653 mem := x.Args[2] 9654 if !(x.Uses == 1 && clobber(x)) { 9655 break 9656 } 9657 v.reset(OpAMD64MOVLstore) 9658 v.AuxInt = i - 2 9659 v.Aux = s 9660 v.AddArg(p) 9661 v.AddArg(w) 9662 v.AddArg(mem) 9663 return true 9664 } 9665 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 9666 // cond: x.Uses == 1 && clobber(x) 9667 // result: (MOVLstore [i-2] {s} p w0 mem) 9668 for { 9669 i := v.AuxInt 9670 s := v.Aux 9671 p := v.Args[0] 9672 v_1 := v.Args[1] 9673 if v_1.Op != OpAMD64SHRQconst { 9674 break 9675 } 9676 j := v_1.AuxInt 9677 w := v_1.Args[0] 9678 x := v.Args[2] 9679 if x.Op != OpAMD64MOVWstore { 9680 break 9681 } 9682 if x.AuxInt != i-2 { 9683 break 9684 } 9685 if x.Aux != s { 9686 break 9687 } 9688 if p != x.Args[0] { 9689 break 9690 } 9691 w0 := x.Args[1] 9692 if w0.Op != OpAMD64SHRQconst { 9693 break 9694 } 9695 if w0.AuxInt != j-16 { 9696 break 9697 } 9698 if w != w0.Args[0] { 9699 break 9700 } 9701 mem := x.Args[2] 9702 if !(x.Uses == 1 && clobber(x)) { 9703 break 9704 } 9705 v.reset(OpAMD64MOVLstore) 9706 v.AuxInt = i - 2 9707 v.Aux = s 9708 v.AddArg(p) 9709 v.AddArg(w0) 9710 v.AddArg(mem) 9711 return true 9712 } 9713 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 9714 // cond: canMergeSym(sym1, sym2) 9715 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9716 for { 9717 off1 := v.AuxInt 9718 sym1 := v.Aux 9719 v_0 := v.Args[0] 9720 if v_0.Op != OpAMD64LEAL { 9721 break 9722 } 9723 off2 := v_0.AuxInt 9724 sym2 := v_0.Aux 9725 base := v_0.Args[0] 9726 val := v.Args[1] 9727 mem := v.Args[2] 9728 if !(canMergeSym(sym1, sym2)) { 9729 break 9730 } 9731 v.reset(OpAMD64MOVWstore) 9732 v.AuxInt = off1 + off2 9733 v.Aux = mergeSym(sym1, sym2) 9734 v.AddArg(base) 9735 v.AddArg(val) 9736 v.AddArg(mem) 9737 return true 9738 } 9739 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 9740 // cond: is32Bit(off1+off2) 9741 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 9742 for { 9743 off1 := v.AuxInt 9744 sym := v.Aux 9745 v_0 := v.Args[0] 9746 if v_0.Op != OpAMD64ADDLconst { 9747 break 9748 } 9749 off2 := v_0.AuxInt 9750 ptr := v_0.Args[0] 9751 val := v.Args[1] 9752 mem := v.Args[2] 9753 if !(is32Bit(off1 + off2)) { 9754 break 9755 } 9756 v.reset(OpAMD64MOVWstore) 9757 v.AuxInt = off1 + off2 9758 v.Aux = sym 9759 v.AddArg(ptr) 9760 v.AddArg(val) 9761 v.AddArg(mem) 9762 return true 9763 } 9764 return false 9765 } 9766 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool { 9767 b := v.Block 9768 _ = b 9769 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 9770 // cond: ValAndOff(sc).canAdd(off) 9771 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9772 for { 9773 sc := v.AuxInt 9774 s := v.Aux 9775 v_0 := v.Args[0] 9776 if v_0.Op != OpAMD64ADDQconst { 9777 break 9778 } 9779 off := v_0.AuxInt 9780 ptr := v_0.Args[0] 9781 mem := v.Args[1] 9782 if !(ValAndOff(sc).canAdd(off)) { 9783 break 9784 } 9785 v.reset(OpAMD64MOVWstoreconst) 9786 v.AuxInt = ValAndOff(sc).add(off) 9787 v.Aux = s 9788 v.AddArg(ptr) 9789 v.AddArg(mem) 9790 return true 9791 } 9792 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 9793 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 9794 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 9795 for { 9796 sc := v.AuxInt 9797 sym1 := v.Aux 9798 v_0 := v.Args[0] 9799 if v_0.Op != OpAMD64LEAQ { 9800 break 9801 } 9802 off := v_0.AuxInt 9803 sym2 := v_0.Aux 9804 ptr := v_0.Args[0] 9805 mem := v.Args[1] 9806 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9807 break 9808 } 9809 v.reset(OpAMD64MOVWstoreconst) 9810 v.AuxInt = ValAndOff(sc).add(off) 9811 v.Aux = mergeSym(sym1, sym2) 9812 v.AddArg(ptr) 9813 v.AddArg(mem) 9814 return true 9815 } 9816 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 9817 // cond: canMergeSym(sym1, sym2) 9818 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 9819 for { 9820 x := v.AuxInt 9821 sym1 := v.Aux 9822 v_0 := v.Args[0] 9823 if v_0.Op != OpAMD64LEAQ1 { 9824 break 9825 } 9826 off := v_0.AuxInt 9827 sym2 := v_0.Aux 9828 ptr := v_0.Args[0] 9829 idx := v_0.Args[1] 9830 mem := v.Args[1] 9831 if !(canMergeSym(sym1, sym2)) { 9832 break 9833 } 9834 v.reset(OpAMD64MOVWstoreconstidx1) 9835 v.AuxInt = ValAndOff(x).add(off) 9836 v.Aux = mergeSym(sym1, sym2) 9837 v.AddArg(ptr) 9838 v.AddArg(idx) 9839 v.AddArg(mem) 9840 return true 9841 } 9842 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 9843 // cond: canMergeSym(sym1, sym2) 9844 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 9845 for { 9846 x := v.AuxInt 9847 sym1 := v.Aux 9848 v_0 := v.Args[0] 9849 if v_0.Op != OpAMD64LEAQ2 { 9850 break 9851 } 9852 off := v_0.AuxInt 9853 sym2 := v_0.Aux 9854 ptr := v_0.Args[0] 9855 idx := v_0.Args[1] 9856 mem := v.Args[1] 9857 if !(canMergeSym(sym1, sym2)) { 9858 break 9859 } 9860 v.reset(OpAMD64MOVWstoreconstidx2) 9861 v.AuxInt = ValAndOff(x).add(off) 9862 v.Aux = mergeSym(sym1, sym2) 9863 v.AddArg(ptr) 9864 v.AddArg(idx) 9865 v.AddArg(mem) 9866 return true 9867 } 9868 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 9869 // cond: 9870 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 9871 for { 9872 x := v.AuxInt 9873 sym := v.Aux 9874 v_0 := v.Args[0] 9875 if v_0.Op != OpAMD64ADDQ { 9876 break 9877 } 9878 ptr := v_0.Args[0] 9879 idx := v_0.Args[1] 9880 mem := v.Args[1] 9881 v.reset(OpAMD64MOVWstoreconstidx1) 9882 v.AuxInt = x 9883 v.Aux = sym 9884 v.AddArg(ptr) 9885 v.AddArg(idx) 9886 v.AddArg(mem) 9887 return true 9888 } 9889 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 9890 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 9891 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 9892 for { 9893 c := v.AuxInt 9894 s := v.Aux 9895 p := v.Args[0] 9896 x := v.Args[1] 9897 if x.Op != OpAMD64MOVWstoreconst { 9898 break 9899 } 9900 a := x.AuxInt 9901 if x.Aux != s { 9902 break 9903 } 9904 if p != x.Args[0] { 9905 break 9906 } 9907 mem := x.Args[1] 9908 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 9909 break 9910 } 9911 v.reset(OpAMD64MOVLstoreconst) 9912 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 9913 v.Aux = s 9914 v.AddArg(p) 9915 v.AddArg(mem) 9916 return true 9917 } 9918 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 9919 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 9920 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 9921 for { 9922 sc := v.AuxInt 9923 sym1 := v.Aux 9924 v_0 := v.Args[0] 9925 if v_0.Op != OpAMD64LEAL { 9926 break 9927 } 9928 off := v_0.AuxInt 9929 sym2 := v_0.Aux 9930 ptr := v_0.Args[0] 9931 mem := v.Args[1] 9932 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9933 break 9934 } 9935 v.reset(OpAMD64MOVWstoreconst) 9936 v.AuxInt = ValAndOff(sc).add(off) 9937 v.Aux = mergeSym(sym1, sym2) 9938 v.AddArg(ptr) 9939 v.AddArg(mem) 9940 return true 9941 } 9942 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 9943 // cond: ValAndOff(sc).canAdd(off) 9944 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9945 for { 9946 sc := v.AuxInt 9947 s := v.Aux 9948 v_0 := v.Args[0] 9949 if v_0.Op != OpAMD64ADDLconst { 9950 break 9951 } 9952 off := v_0.AuxInt 9953 ptr := v_0.Args[0] 9954 mem := v.Args[1] 9955 if !(ValAndOff(sc).canAdd(off)) { 9956 break 9957 } 9958 v.reset(OpAMD64MOVWstoreconst) 9959 v.AuxInt = ValAndOff(sc).add(off) 9960 v.Aux = s 9961 v.AddArg(ptr) 9962 v.AddArg(mem) 9963 return true 9964 } 9965 return false 9966 } 9967 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool { 9968 b := v.Block 9969 _ = b 9970 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 9971 // cond: 9972 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 9973 for { 9974 c := v.AuxInt 9975 sym := v.Aux 9976 ptr := v.Args[0] 9977 v_1 := v.Args[1] 9978 if v_1.Op != OpAMD64SHLQconst { 9979 break 9980 } 9981 if v_1.AuxInt != 1 { 9982 break 9983 } 9984 idx := v_1.Args[0] 9985 mem := v.Args[2] 9986 v.reset(OpAMD64MOVWstoreconstidx2) 9987 v.AuxInt = c 9988 v.Aux = sym 9989 v.AddArg(ptr) 9990 v.AddArg(idx) 9991 v.AddArg(mem) 9992 return true 9993 } 9994 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 9995 // cond: 9996 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9997 for { 9998 x := v.AuxInt 9999 sym := v.Aux 10000 v_0 := v.Args[0] 10001 if v_0.Op != OpAMD64ADDQconst { 10002 break 10003 } 10004 c := v_0.AuxInt 10005 ptr := v_0.Args[0] 10006 idx := v.Args[1] 10007 mem := v.Args[2] 10008 v.reset(OpAMD64MOVWstoreconstidx1) 10009 v.AuxInt = ValAndOff(x).add(c) 10010 v.Aux = sym 10011 v.AddArg(ptr) 10012 v.AddArg(idx) 10013 v.AddArg(mem) 10014 return true 10015 } 10016 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 10017 // cond: 10018 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10019 for { 10020 x := v.AuxInt 10021 sym := v.Aux 10022 ptr := v.Args[0] 10023 v_1 := v.Args[1] 10024 if v_1.Op != OpAMD64ADDQconst { 10025 break 10026 } 10027 c := v_1.AuxInt 10028 idx := v_1.Args[0] 10029 mem := v.Args[2] 10030 v.reset(OpAMD64MOVWstoreconstidx1) 10031 v.AuxInt = ValAndOff(x).add(c) 10032 v.Aux = sym 10033 v.AddArg(ptr) 10034 v.AddArg(idx) 10035 v.AddArg(mem) 10036 return true 10037 } 10038 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 10039 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 10040 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 10041 for { 10042 c := v.AuxInt 10043 s := v.Aux 10044 p := v.Args[0] 10045 i := v.Args[1] 10046 x := v.Args[2] 10047 if x.Op != OpAMD64MOVWstoreconstidx1 { 10048 break 10049 } 10050 a := x.AuxInt 10051 if x.Aux != s { 10052 break 10053 } 10054 if p != x.Args[0] { 10055 break 10056 } 10057 if i != x.Args[1] { 10058 break 10059 } 10060 mem := x.Args[2] 10061 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 10062 break 10063 } 10064 v.reset(OpAMD64MOVLstoreconstidx1) 10065 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 10066 v.Aux = s 10067 v.AddArg(p) 10068 v.AddArg(i) 10069 v.AddArg(mem) 10070 return true 10071 } 10072 return false 10073 } 10074 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool { 10075 b := v.Block 10076 _ = b 10077 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 10078 // cond: 10079 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10080 for { 10081 x := v.AuxInt 10082 sym := v.Aux 10083 v_0 := v.Args[0] 10084 if v_0.Op != OpAMD64ADDQconst { 10085 break 10086 } 10087 c := v_0.AuxInt 10088 ptr := v_0.Args[0] 10089 idx := v.Args[1] 10090 mem := v.Args[2] 10091 v.reset(OpAMD64MOVWstoreconstidx2) 10092 v.AuxInt = ValAndOff(x).add(c) 10093 v.Aux = sym 10094 v.AddArg(ptr) 10095 v.AddArg(idx) 10096 v.AddArg(mem) 10097 return true 10098 } 10099 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 10100 // cond: 10101 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 10102 for { 10103 x := v.AuxInt 10104 sym := v.Aux 10105 ptr := v.Args[0] 10106 v_1 := v.Args[1] 10107 if v_1.Op != OpAMD64ADDQconst { 10108 break 10109 } 10110 c := v_1.AuxInt 10111 idx := v_1.Args[0] 10112 mem := v.Args[2] 10113 v.reset(OpAMD64MOVWstoreconstidx2) 10114 v.AuxInt = ValAndOff(x).add(2 * c) 10115 v.Aux = sym 10116 v.AddArg(ptr) 10117 v.AddArg(idx) 10118 v.AddArg(mem) 10119 return true 10120 } 10121 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 10122 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 10123 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 10124 for { 10125 c := v.AuxInt 10126 s := v.Aux 10127 p := v.Args[0] 10128 i := v.Args[1] 10129 x := v.Args[2] 10130 if x.Op != OpAMD64MOVWstoreconstidx2 { 10131 break 10132 } 10133 a := x.AuxInt 10134 if x.Aux != s { 10135 break 10136 } 10137 if p != x.Args[0] { 10138 break 10139 } 10140 if i != x.Args[1] { 10141 break 10142 } 10143 mem := x.Args[2] 10144 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 10145 break 10146 } 10147 v.reset(OpAMD64MOVLstoreconstidx1) 10148 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 10149 v.Aux = s 10150 v.AddArg(p) 10151 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 10152 v0.AuxInt = 1 10153 v0.AddArg(i) 10154 v.AddArg(v0) 10155 v.AddArg(mem) 10156 return true 10157 } 10158 return false 10159 } 10160 func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool { 10161 b := v.Block 10162 _ = b 10163 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 10164 // cond: 10165 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 10166 for { 10167 c := v.AuxInt 10168 sym := v.Aux 10169 ptr := v.Args[0] 10170 v_1 := v.Args[1] 10171 if v_1.Op != OpAMD64SHLQconst { 10172 break 10173 } 10174 if v_1.AuxInt != 1 { 10175 break 10176 } 10177 idx := v_1.Args[0] 10178 val := v.Args[2] 10179 mem := v.Args[3] 10180 v.reset(OpAMD64MOVWstoreidx2) 10181 v.AuxInt = c 10182 v.Aux = sym 10183 v.AddArg(ptr) 10184 v.AddArg(idx) 10185 v.AddArg(val) 10186 v.AddArg(mem) 10187 return true 10188 } 10189 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10190 // cond: 10191 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 10192 for { 10193 c := v.AuxInt 10194 sym := v.Aux 10195 v_0 := v.Args[0] 10196 if v_0.Op != OpAMD64ADDQconst { 10197 break 10198 } 10199 d := v_0.AuxInt 10200 ptr := v_0.Args[0] 10201 idx := v.Args[1] 10202 val := v.Args[2] 10203 mem := v.Args[3] 10204 v.reset(OpAMD64MOVWstoreidx1) 10205 v.AuxInt = c + d 10206 v.Aux = sym 10207 v.AddArg(ptr) 10208 v.AddArg(idx) 10209 v.AddArg(val) 10210 v.AddArg(mem) 10211 return true 10212 } 10213 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10214 // cond: 10215 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 10216 for { 10217 c := v.AuxInt 10218 sym := v.Aux 10219 ptr := v.Args[0] 10220 v_1 := v.Args[1] 10221 if v_1.Op != OpAMD64ADDQconst { 10222 break 10223 } 10224 d := v_1.AuxInt 10225 idx := v_1.Args[0] 10226 val := v.Args[2] 10227 mem := v.Args[3] 10228 v.reset(OpAMD64MOVWstoreidx1) 10229 v.AuxInt = c + d 10230 v.Aux = sym 10231 v.AddArg(ptr) 10232 v.AddArg(idx) 10233 v.AddArg(val) 10234 v.AddArg(mem) 10235 return true 10236 } 10237 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 10238 // cond: x.Uses == 1 && clobber(x) 10239 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 10240 for { 10241 i := v.AuxInt 10242 s := v.Aux 10243 p := v.Args[0] 10244 idx := v.Args[1] 10245 v_2 := v.Args[2] 10246 if v_2.Op != OpAMD64SHRQconst { 10247 break 10248 } 10249 if v_2.AuxInt != 16 { 10250 break 10251 } 10252 w := v_2.Args[0] 10253 x := v.Args[3] 10254 if x.Op != OpAMD64MOVWstoreidx1 { 10255 break 10256 } 10257 if x.AuxInt != i-2 { 10258 break 10259 } 10260 if x.Aux != s { 10261 break 10262 } 10263 if p != x.Args[0] { 10264 break 10265 } 10266 if idx != x.Args[1] { 10267 break 10268 } 10269 if w != x.Args[2] { 10270 break 10271 } 10272 mem := x.Args[3] 10273 if !(x.Uses == 1 && clobber(x)) { 10274 break 10275 } 10276 v.reset(OpAMD64MOVLstoreidx1) 10277 v.AuxInt = i - 2 10278 v.Aux = s 10279 v.AddArg(p) 10280 v.AddArg(idx) 10281 v.AddArg(w) 10282 v.AddArg(mem) 10283 return true 10284 } 10285 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 10286 // cond: x.Uses == 1 && clobber(x) 10287 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 10288 for { 10289 i := v.AuxInt 10290 s := v.Aux 10291 p := v.Args[0] 10292 idx := v.Args[1] 10293 v_2 := v.Args[2] 10294 if v_2.Op != OpAMD64SHRQconst { 10295 break 10296 } 10297 j := v_2.AuxInt 10298 w := v_2.Args[0] 10299 x := v.Args[3] 10300 if x.Op != OpAMD64MOVWstoreidx1 { 10301 break 10302 } 10303 if x.AuxInt != i-2 { 10304 break 10305 } 10306 if x.Aux != s { 10307 break 10308 } 10309 if p != x.Args[0] { 10310 break 10311 } 10312 if idx != x.Args[1] { 10313 break 10314 } 10315 w0 := x.Args[2] 10316 if w0.Op != OpAMD64SHRQconst { 10317 break 10318 } 10319 if w0.AuxInt != j-16 { 10320 break 10321 } 10322 if w != w0.Args[0] { 10323 break 10324 } 10325 mem := x.Args[3] 10326 if !(x.Uses == 1 && clobber(x)) { 10327 break 10328 } 10329 v.reset(OpAMD64MOVLstoreidx1) 10330 v.AuxInt = i - 2 10331 v.Aux = s 10332 v.AddArg(p) 10333 v.AddArg(idx) 10334 v.AddArg(w0) 10335 v.AddArg(mem) 10336 return true 10337 } 10338 return false 10339 } 10340 func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool { 10341 b := v.Block 10342 _ = b 10343 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10344 // cond: 10345 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 10346 for { 10347 c := v.AuxInt 10348 sym := v.Aux 10349 v_0 := v.Args[0] 10350 if v_0.Op != OpAMD64ADDQconst { 10351 break 10352 } 10353 d := v_0.AuxInt 10354 ptr := v_0.Args[0] 10355 idx := v.Args[1] 10356 val := v.Args[2] 10357 mem := v.Args[3] 10358 v.reset(OpAMD64MOVWstoreidx2) 10359 v.AuxInt = c + d 10360 v.Aux = sym 10361 v.AddArg(ptr) 10362 v.AddArg(idx) 10363 v.AddArg(val) 10364 v.AddArg(mem) 10365 return true 10366 } 10367 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10368 // cond: 10369 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 10370 for { 10371 c := v.AuxInt 10372 sym := v.Aux 10373 ptr := v.Args[0] 10374 v_1 := v.Args[1] 10375 if v_1.Op != OpAMD64ADDQconst { 10376 break 10377 } 10378 d := v_1.AuxInt 10379 idx := v_1.Args[0] 10380 val := v.Args[2] 10381 mem := v.Args[3] 10382 v.reset(OpAMD64MOVWstoreidx2) 10383 v.AuxInt = c + 2*d 10384 v.Aux = sym 10385 v.AddArg(ptr) 10386 v.AddArg(idx) 10387 v.AddArg(val) 10388 v.AddArg(mem) 10389 return true 10390 } 10391 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 10392 // cond: x.Uses == 1 && clobber(x) 10393 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 10394 for { 10395 i := v.AuxInt 10396 s := v.Aux 10397 p := v.Args[0] 10398 idx := v.Args[1] 10399 v_2 := v.Args[2] 10400 if v_2.Op != OpAMD64SHRQconst { 10401 break 10402 } 10403 if v_2.AuxInt != 16 { 10404 break 10405 } 10406 w := v_2.Args[0] 10407 x := v.Args[3] 10408 if x.Op != OpAMD64MOVWstoreidx2 { 10409 break 10410 } 10411 if x.AuxInt != i-2 { 10412 break 10413 } 10414 if x.Aux != s { 10415 break 10416 } 10417 if p != x.Args[0] { 10418 break 10419 } 10420 if idx != x.Args[1] { 10421 break 10422 } 10423 if w != x.Args[2] { 10424 break 10425 } 10426 mem := x.Args[3] 10427 if !(x.Uses == 1 && clobber(x)) { 10428 break 10429 } 10430 v.reset(OpAMD64MOVLstoreidx1) 10431 v.AuxInt = i - 2 10432 v.Aux = s 10433 v.AddArg(p) 10434 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 10435 v0.AuxInt = 1 10436 v0.AddArg(idx) 10437 v.AddArg(v0) 10438 v.AddArg(w) 10439 v.AddArg(mem) 10440 return true 10441 } 10442 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 10443 // cond: x.Uses == 1 && clobber(x) 10444 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 10445 for { 10446 i := v.AuxInt 10447 s := v.Aux 10448 p := v.Args[0] 10449 idx := v.Args[1] 10450 v_2 := v.Args[2] 10451 if v_2.Op != OpAMD64SHRQconst { 10452 break 10453 } 10454 j := v_2.AuxInt 10455 w := v_2.Args[0] 10456 x := v.Args[3] 10457 if x.Op != OpAMD64MOVWstoreidx2 { 10458 break 10459 } 10460 if x.AuxInt != i-2 { 10461 break 10462 } 10463 if x.Aux != s { 10464 break 10465 } 10466 if p != x.Args[0] { 10467 break 10468 } 10469 if idx != x.Args[1] { 10470 break 10471 } 10472 w0 := x.Args[2] 10473 if w0.Op != OpAMD64SHRQconst { 10474 break 10475 } 10476 if w0.AuxInt != j-16 { 10477 break 10478 } 10479 if w != w0.Args[0] { 10480 break 10481 } 10482 mem := x.Args[3] 10483 if !(x.Uses == 1 && clobber(x)) { 10484 break 10485 } 10486 v.reset(OpAMD64MOVLstoreidx1) 10487 v.AuxInt = i - 2 10488 v.Aux = s 10489 v.AddArg(p) 10490 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 10491 v0.AuxInt = 1 10492 v0.AddArg(idx) 10493 v.AddArg(v0) 10494 v.AddArg(w0) 10495 v.AddArg(mem) 10496 return true 10497 } 10498 return false 10499 } 10500 func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool { 10501 b := v.Block 10502 _ = b 10503 // match: (MULL x (MOVLconst [c])) 10504 // cond: 10505 // result: (MULLconst [c] x) 10506 for { 10507 x := v.Args[0] 10508 v_1 := v.Args[1] 10509 if v_1.Op != OpAMD64MOVLconst { 10510 break 10511 } 10512 c := v_1.AuxInt 10513 v.reset(OpAMD64MULLconst) 10514 v.AuxInt = c 10515 v.AddArg(x) 10516 return true 10517 } 10518 // match: (MULL (MOVLconst [c]) x) 10519 // cond: 10520 // result: (MULLconst [c] x) 10521 for { 10522 v_0 := v.Args[0] 10523 if v_0.Op != OpAMD64MOVLconst { 10524 break 10525 } 10526 c := v_0.AuxInt 10527 x := v.Args[1] 10528 v.reset(OpAMD64MULLconst) 10529 v.AuxInt = c 10530 v.AddArg(x) 10531 return true 10532 } 10533 return false 10534 } 10535 func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool { 10536 b := v.Block 10537 _ = b 10538 // match: (MULLconst [c] (MULLconst [d] x)) 10539 // cond: 10540 // result: (MULLconst [int64(int32(c * d))] x) 10541 for { 10542 c := v.AuxInt 10543 v_0 := v.Args[0] 10544 if v_0.Op != OpAMD64MULLconst { 10545 break 10546 } 10547 d := v_0.AuxInt 10548 x := v_0.Args[0] 10549 v.reset(OpAMD64MULLconst) 10550 v.AuxInt = int64(int32(c * d)) 10551 v.AddArg(x) 10552 return true 10553 } 10554 // match: (MULLconst [c] (MOVLconst [d])) 10555 // cond: 10556 // result: (MOVLconst [int64(int32(c*d))]) 10557 for { 10558 c := v.AuxInt 10559 v_0 := v.Args[0] 10560 if v_0.Op != OpAMD64MOVLconst { 10561 break 10562 } 10563 d := v_0.AuxInt 10564 v.reset(OpAMD64MOVLconst) 10565 v.AuxInt = int64(int32(c * d)) 10566 return true 10567 } 10568 return false 10569 } 10570 func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool { 10571 b := v.Block 10572 _ = b 10573 // match: (MULQ x (MOVQconst [c])) 10574 // cond: is32Bit(c) 10575 // result: (MULQconst [c] x) 10576 for { 10577 x := v.Args[0] 10578 v_1 := v.Args[1] 10579 if v_1.Op != OpAMD64MOVQconst { 10580 break 10581 } 10582 c := v_1.AuxInt 10583 if !(is32Bit(c)) { 10584 break 10585 } 10586 v.reset(OpAMD64MULQconst) 10587 v.AuxInt = c 10588 v.AddArg(x) 10589 return true 10590 } 10591 // match: (MULQ (MOVQconst [c]) x) 10592 // cond: is32Bit(c) 10593 // result: (MULQconst [c] x) 10594 for { 10595 v_0 := v.Args[0] 10596 if v_0.Op != OpAMD64MOVQconst { 10597 break 10598 } 10599 c := v_0.AuxInt 10600 x := v.Args[1] 10601 if !(is32Bit(c)) { 10602 break 10603 } 10604 v.reset(OpAMD64MULQconst) 10605 v.AuxInt = c 10606 v.AddArg(x) 10607 return true 10608 } 10609 return false 10610 } 10611 func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool { 10612 b := v.Block 10613 _ = b 10614 // match: (MULQconst [c] (MULQconst [d] x)) 10615 // cond: is32Bit(c*d) 10616 // result: (MULQconst [c * d] x) 10617 for { 10618 c := v.AuxInt 10619 v_0 := v.Args[0] 10620 if v_0.Op != OpAMD64MULQconst { 10621 break 10622 } 10623 d := v_0.AuxInt 10624 x := v_0.Args[0] 10625 if !(is32Bit(c * d)) { 10626 break 10627 } 10628 v.reset(OpAMD64MULQconst) 10629 v.AuxInt = c * d 10630 v.AddArg(x) 10631 return true 10632 } 10633 // match: (MULQconst [-1] x) 10634 // cond: 10635 // result: (NEGQ x) 10636 for { 10637 if v.AuxInt != -1 { 10638 break 10639 } 10640 x := v.Args[0] 10641 v.reset(OpAMD64NEGQ) 10642 v.AddArg(x) 10643 return true 10644 } 10645 // match: (MULQconst [0] _) 10646 // cond: 10647 // result: (MOVQconst [0]) 10648 for { 10649 if v.AuxInt != 0 { 10650 break 10651 } 10652 v.reset(OpAMD64MOVQconst) 10653 v.AuxInt = 0 10654 return true 10655 } 10656 // match: (MULQconst [1] x) 10657 // cond: 10658 // result: x 10659 for { 10660 if v.AuxInt != 1 { 10661 break 10662 } 10663 x := v.Args[0] 10664 v.reset(OpCopy) 10665 v.Type = x.Type 10666 v.AddArg(x) 10667 return true 10668 } 10669 // match: (MULQconst [3] x) 10670 // cond: 10671 // result: (LEAQ2 x x) 10672 for { 10673 if v.AuxInt != 3 { 10674 break 10675 } 10676 x := v.Args[0] 10677 v.reset(OpAMD64LEAQ2) 10678 v.AddArg(x) 10679 v.AddArg(x) 10680 return true 10681 } 10682 // match: (MULQconst [5] x) 10683 // cond: 10684 // result: (LEAQ4 x x) 10685 for { 10686 if v.AuxInt != 5 { 10687 break 10688 } 10689 x := v.Args[0] 10690 v.reset(OpAMD64LEAQ4) 10691 v.AddArg(x) 10692 v.AddArg(x) 10693 return true 10694 } 10695 // match: (MULQconst [7] x) 10696 // cond: 10697 // result: (LEAQ8 (NEGQ <v.Type> x) x) 10698 for { 10699 if v.AuxInt != 7 { 10700 break 10701 } 10702 x := v.Args[0] 10703 v.reset(OpAMD64LEAQ8) 10704 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) 10705 v0.AddArg(x) 10706 v.AddArg(v0) 10707 v.AddArg(x) 10708 return true 10709 } 10710 // match: (MULQconst [9] x) 10711 // cond: 10712 // result: (LEAQ8 x x) 10713 for { 10714 if v.AuxInt != 9 { 10715 break 10716 } 10717 x := v.Args[0] 10718 v.reset(OpAMD64LEAQ8) 10719 v.AddArg(x) 10720 v.AddArg(x) 10721 return true 10722 } 10723 // match: (MULQconst [11] x) 10724 // cond: 10725 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 10726 for { 10727 if v.AuxInt != 11 { 10728 break 10729 } 10730 x := v.Args[0] 10731 v.reset(OpAMD64LEAQ2) 10732 v.AddArg(x) 10733 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 10734 v0.AddArg(x) 10735 v0.AddArg(x) 10736 v.AddArg(v0) 10737 return true 10738 } 10739 // match: (MULQconst [13] x) 10740 // cond: 10741 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 10742 for { 10743 if v.AuxInt != 13 { 10744 break 10745 } 10746 x := v.Args[0] 10747 v.reset(OpAMD64LEAQ4) 10748 v.AddArg(x) 10749 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 10750 v0.AddArg(x) 10751 v0.AddArg(x) 10752 v.AddArg(v0) 10753 return true 10754 } 10755 // match: (MULQconst [21] x) 10756 // cond: 10757 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 10758 for { 10759 if v.AuxInt != 21 { 10760 break 10761 } 10762 x := v.Args[0] 10763 v.reset(OpAMD64LEAQ4) 10764 v.AddArg(x) 10765 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 10766 v0.AddArg(x) 10767 v0.AddArg(x) 10768 v.AddArg(v0) 10769 return true 10770 } 10771 // match: (MULQconst [25] x) 10772 // cond: 10773 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 10774 for { 10775 if v.AuxInt != 25 { 10776 break 10777 } 10778 x := v.Args[0] 10779 v.reset(OpAMD64LEAQ8) 10780 v.AddArg(x) 10781 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 10782 v0.AddArg(x) 10783 v0.AddArg(x) 10784 v.AddArg(v0) 10785 return true 10786 } 10787 // match: (MULQconst [37] x) 10788 // cond: 10789 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 10790 for { 10791 if v.AuxInt != 37 { 10792 break 10793 } 10794 x := v.Args[0] 10795 v.reset(OpAMD64LEAQ4) 10796 v.AddArg(x) 10797 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 10798 v0.AddArg(x) 10799 v0.AddArg(x) 10800 v.AddArg(v0) 10801 return true 10802 } 10803 // match: (MULQconst [41] x) 10804 // cond: 10805 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 10806 for { 10807 if v.AuxInt != 41 { 10808 break 10809 } 10810 x := v.Args[0] 10811 v.reset(OpAMD64LEAQ8) 10812 v.AddArg(x) 10813 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 10814 v0.AddArg(x) 10815 v0.AddArg(x) 10816 v.AddArg(v0) 10817 return true 10818 } 10819 // match: (MULQconst [73] x) 10820 // cond: 10821 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 10822 for { 10823 if v.AuxInt != 73 { 10824 break 10825 } 10826 x := v.Args[0] 10827 v.reset(OpAMD64LEAQ8) 10828 v.AddArg(x) 10829 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 10830 v0.AddArg(x) 10831 v0.AddArg(x) 10832 v.AddArg(v0) 10833 return true 10834 } 10835 // match: (MULQconst [c] x) 10836 // cond: isPowerOfTwo(c) 10837 // result: (SHLQconst [log2(c)] x) 10838 for { 10839 c := v.AuxInt 10840 x := v.Args[0] 10841 if !(isPowerOfTwo(c)) { 10842 break 10843 } 10844 v.reset(OpAMD64SHLQconst) 10845 v.AuxInt = log2(c) 10846 v.AddArg(x) 10847 return true 10848 } 10849 // match: (MULQconst [c] x) 10850 // cond: isPowerOfTwo(c+1) && c >= 15 10851 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 10852 for { 10853 c := v.AuxInt 10854 x := v.Args[0] 10855 if !(isPowerOfTwo(c+1) && c >= 15) { 10856 break 10857 } 10858 v.reset(OpAMD64SUBQ) 10859 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 10860 v0.AuxInt = log2(c + 1) 10861 v0.AddArg(x) 10862 v.AddArg(v0) 10863 v.AddArg(x) 10864 return true 10865 } 10866 // match: (MULQconst [c] x) 10867 // cond: isPowerOfTwo(c-1) && c >= 17 10868 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 10869 for { 10870 c := v.AuxInt 10871 x := v.Args[0] 10872 if !(isPowerOfTwo(c-1) && c >= 17) { 10873 break 10874 } 10875 v.reset(OpAMD64LEAQ1) 10876 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 10877 v0.AuxInt = log2(c - 1) 10878 v0.AddArg(x) 10879 v.AddArg(v0) 10880 v.AddArg(x) 10881 return true 10882 } 10883 // match: (MULQconst [c] x) 10884 // cond: isPowerOfTwo(c-2) && c >= 34 10885 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 10886 for { 10887 c := v.AuxInt 10888 x := v.Args[0] 10889 if !(isPowerOfTwo(c-2) && c >= 34) { 10890 break 10891 } 10892 v.reset(OpAMD64LEAQ2) 10893 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 10894 v0.AuxInt = log2(c - 2) 10895 v0.AddArg(x) 10896 v.AddArg(v0) 10897 v.AddArg(x) 10898 return true 10899 } 10900 // match: (MULQconst [c] x) 10901 // cond: isPowerOfTwo(c-4) && c >= 68 10902 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 10903 for { 10904 c := v.AuxInt 10905 x := v.Args[0] 10906 if !(isPowerOfTwo(c-4) && c >= 68) { 10907 break 10908 } 10909 v.reset(OpAMD64LEAQ4) 10910 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 10911 v0.AuxInt = log2(c - 4) 10912 v0.AddArg(x) 10913 v.AddArg(v0) 10914 v.AddArg(x) 10915 return true 10916 } 10917 // match: (MULQconst [c] x) 10918 // cond: isPowerOfTwo(c-8) && c >= 136 10919 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 10920 for { 10921 c := v.AuxInt 10922 x := v.Args[0] 10923 if !(isPowerOfTwo(c-8) && c >= 136) { 10924 break 10925 } 10926 v.reset(OpAMD64LEAQ8) 10927 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 10928 v0.AuxInt = log2(c - 8) 10929 v0.AddArg(x) 10930 v.AddArg(v0) 10931 v.AddArg(x) 10932 return true 10933 } 10934 // match: (MULQconst [c] x) 10935 // cond: c%3 == 0 && isPowerOfTwo(c/3) 10936 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 10937 for { 10938 c := v.AuxInt 10939 x := v.Args[0] 10940 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 10941 break 10942 } 10943 v.reset(OpAMD64SHLQconst) 10944 v.AuxInt = log2(c / 3) 10945 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 10946 v0.AddArg(x) 10947 v0.AddArg(x) 10948 v.AddArg(v0) 10949 return true 10950 } 10951 // match: (MULQconst [c] x) 10952 // cond: c%5 == 0 && isPowerOfTwo(c/5) 10953 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 10954 for { 10955 c := v.AuxInt 10956 x := v.Args[0] 10957 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 10958 break 10959 } 10960 v.reset(OpAMD64SHLQconst) 10961 v.AuxInt = log2(c / 5) 10962 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 10963 v0.AddArg(x) 10964 v0.AddArg(x) 10965 v.AddArg(v0) 10966 return true 10967 } 10968 // match: (MULQconst [c] x) 10969 // cond: c%9 == 0 && isPowerOfTwo(c/9) 10970 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 10971 for { 10972 c := v.AuxInt 10973 x := v.Args[0] 10974 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 10975 break 10976 } 10977 v.reset(OpAMD64SHLQconst) 10978 v.AuxInt = log2(c / 9) 10979 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 10980 v0.AddArg(x) 10981 v0.AddArg(x) 10982 v.AddArg(v0) 10983 return true 10984 } 10985 // match: (MULQconst [c] (MOVQconst [d])) 10986 // cond: 10987 // result: (MOVQconst [c*d]) 10988 for { 10989 c := v.AuxInt 10990 v_0 := v.Args[0] 10991 if v_0.Op != OpAMD64MOVQconst { 10992 break 10993 } 10994 d := v_0.AuxInt 10995 v.reset(OpAMD64MOVQconst) 10996 v.AuxInt = c * d 10997 return true 10998 } 10999 return false 11000 } 11001 func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool { 11002 b := v.Block 11003 _ = b 11004 // match: (NEGL (MOVLconst [c])) 11005 // cond: 11006 // result: (MOVLconst [int64(int32(-c))]) 11007 for { 11008 v_0 := v.Args[0] 11009 if v_0.Op != OpAMD64MOVLconst { 11010 break 11011 } 11012 c := v_0.AuxInt 11013 v.reset(OpAMD64MOVLconst) 11014 v.AuxInt = int64(int32(-c)) 11015 return true 11016 } 11017 return false 11018 } 11019 func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool { 11020 b := v.Block 11021 _ = b 11022 // match: (NEGQ (MOVQconst [c])) 11023 // cond: 11024 // result: (MOVQconst [-c]) 11025 for { 11026 v_0 := v.Args[0] 11027 if v_0.Op != OpAMD64MOVQconst { 11028 break 11029 } 11030 c := v_0.AuxInt 11031 v.reset(OpAMD64MOVQconst) 11032 v.AuxInt = -c 11033 return true 11034 } 11035 return false 11036 } 11037 func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool { 11038 b := v.Block 11039 _ = b 11040 // match: (NOTL (MOVLconst [c])) 11041 // cond: 11042 // result: (MOVLconst [^c]) 11043 for { 11044 v_0 := v.Args[0] 11045 if v_0.Op != OpAMD64MOVLconst { 11046 break 11047 } 11048 c := v_0.AuxInt 11049 v.reset(OpAMD64MOVLconst) 11050 v.AuxInt = ^c 11051 return true 11052 } 11053 return false 11054 } 11055 func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool { 11056 b := v.Block 11057 _ = b 11058 // match: (NOTQ (MOVQconst [c])) 11059 // cond: 11060 // result: (MOVQconst [^c]) 11061 for { 11062 v_0 := v.Args[0] 11063 if v_0.Op != OpAMD64MOVQconst { 11064 break 11065 } 11066 c := v_0.AuxInt 11067 v.reset(OpAMD64MOVQconst) 11068 v.AuxInt = ^c 11069 return true 11070 } 11071 return false 11072 } 11073 func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { 11074 b := v.Block 11075 _ = b 11076 // match: (ORL x (MOVLconst [c])) 11077 // cond: 11078 // result: (ORLconst [c] x) 11079 for { 11080 x := v.Args[0] 11081 v_1 := v.Args[1] 11082 if v_1.Op != OpAMD64MOVLconst { 11083 break 11084 } 11085 c := v_1.AuxInt 11086 v.reset(OpAMD64ORLconst) 11087 v.AuxInt = c 11088 v.AddArg(x) 11089 return true 11090 } 11091 // match: (ORL (MOVLconst [c]) x) 11092 // cond: 11093 // result: (ORLconst [c] x) 11094 for { 11095 v_0 := v.Args[0] 11096 if v_0.Op != OpAMD64MOVLconst { 11097 break 11098 } 11099 c := v_0.AuxInt 11100 x := v.Args[1] 11101 v.reset(OpAMD64ORLconst) 11102 v.AuxInt = c 11103 v.AddArg(x) 11104 return true 11105 } 11106 // match: ( ORL (SHLLconst x [c]) (SHRLconst x [32-c])) 11107 // cond: 11108 // result: (ROLLconst x [ c]) 11109 for { 11110 v_0 := v.Args[0] 11111 if v_0.Op != OpAMD64SHLLconst { 11112 break 11113 } 11114 c := v_0.AuxInt 11115 x := v_0.Args[0] 11116 v_1 := v.Args[1] 11117 if v_1.Op != OpAMD64SHRLconst { 11118 break 11119 } 11120 if v_1.AuxInt != 32-c { 11121 break 11122 } 11123 if x != v_1.Args[0] { 11124 break 11125 } 11126 v.reset(OpAMD64ROLLconst) 11127 v.AuxInt = c 11128 v.AddArg(x) 11129 return true 11130 } 11131 // match: ( ORL (SHRLconst x [c]) (SHLLconst x [32-c])) 11132 // cond: 11133 // result: (ROLLconst x [32-c]) 11134 for { 11135 v_0 := v.Args[0] 11136 if v_0.Op != OpAMD64SHRLconst { 11137 break 11138 } 11139 c := v_0.AuxInt 11140 x := v_0.Args[0] 11141 v_1 := v.Args[1] 11142 if v_1.Op != OpAMD64SHLLconst { 11143 break 11144 } 11145 if v_1.AuxInt != 32-c { 11146 break 11147 } 11148 if x != v_1.Args[0] { 11149 break 11150 } 11151 v.reset(OpAMD64ROLLconst) 11152 v.AuxInt = 32 - c 11153 v.AddArg(x) 11154 return true 11155 } 11156 // match: ( ORL <t> (SHLLconst x [c]) (SHRWconst x [16-c])) 11157 // cond: c < 16 && t.Size() == 2 11158 // result: (ROLWconst x [ c]) 11159 for { 11160 t := v.Type 11161 v_0 := v.Args[0] 11162 if v_0.Op != OpAMD64SHLLconst { 11163 break 11164 } 11165 c := v_0.AuxInt 11166 x := v_0.Args[0] 11167 v_1 := v.Args[1] 11168 if v_1.Op != OpAMD64SHRWconst { 11169 break 11170 } 11171 if v_1.AuxInt != 16-c { 11172 break 11173 } 11174 if x != v_1.Args[0] { 11175 break 11176 } 11177 if !(c < 16 && t.Size() == 2) { 11178 break 11179 } 11180 v.reset(OpAMD64ROLWconst) 11181 v.AuxInt = c 11182 v.AddArg(x) 11183 return true 11184 } 11185 // match: ( ORL <t> (SHRWconst x [c]) (SHLLconst x [16-c])) 11186 // cond: c > 0 && t.Size() == 2 11187 // result: (ROLWconst x [16-c]) 11188 for { 11189 t := v.Type 11190 v_0 := v.Args[0] 11191 if v_0.Op != OpAMD64SHRWconst { 11192 break 11193 } 11194 c := v_0.AuxInt 11195 x := v_0.Args[0] 11196 v_1 := v.Args[1] 11197 if v_1.Op != OpAMD64SHLLconst { 11198 break 11199 } 11200 if v_1.AuxInt != 16-c { 11201 break 11202 } 11203 if x != v_1.Args[0] { 11204 break 11205 } 11206 if !(c > 0 && t.Size() == 2) { 11207 break 11208 } 11209 v.reset(OpAMD64ROLWconst) 11210 v.AuxInt = 16 - c 11211 v.AddArg(x) 11212 return true 11213 } 11214 // match: ( ORL <t> (SHLLconst x [c]) (SHRBconst x [ 8-c])) 11215 // cond: c < 8 && t.Size() == 1 11216 // result: (ROLBconst x [ c]) 11217 for { 11218 t := v.Type 11219 v_0 := v.Args[0] 11220 if v_0.Op != OpAMD64SHLLconst { 11221 break 11222 } 11223 c := v_0.AuxInt 11224 x := v_0.Args[0] 11225 v_1 := v.Args[1] 11226 if v_1.Op != OpAMD64SHRBconst { 11227 break 11228 } 11229 if v_1.AuxInt != 8-c { 11230 break 11231 } 11232 if x != v_1.Args[0] { 11233 break 11234 } 11235 if !(c < 8 && t.Size() == 1) { 11236 break 11237 } 11238 v.reset(OpAMD64ROLBconst) 11239 v.AuxInt = c 11240 v.AddArg(x) 11241 return true 11242 } 11243 // match: ( ORL <t> (SHRBconst x [c]) (SHLLconst x [ 8-c])) 11244 // cond: c > 0 && t.Size() == 1 11245 // result: (ROLBconst x [ 8-c]) 11246 for { 11247 t := v.Type 11248 v_0 := v.Args[0] 11249 if v_0.Op != OpAMD64SHRBconst { 11250 break 11251 } 11252 c := v_0.AuxInt 11253 x := v_0.Args[0] 11254 v_1 := v.Args[1] 11255 if v_1.Op != OpAMD64SHLLconst { 11256 break 11257 } 11258 if v_1.AuxInt != 8-c { 11259 break 11260 } 11261 if x != v_1.Args[0] { 11262 break 11263 } 11264 if !(c > 0 && t.Size() == 1) { 11265 break 11266 } 11267 v.reset(OpAMD64ROLBconst) 11268 v.AuxInt = 8 - c 11269 v.AddArg(x) 11270 return true 11271 } 11272 // match: (ORL x x) 11273 // cond: 11274 // result: x 11275 for { 11276 x := v.Args[0] 11277 if x != v.Args[1] { 11278 break 11279 } 11280 v.reset(OpCopy) 11281 v.Type = x.Type 11282 v.AddArg(x) 11283 return true 11284 } 11285 // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) 11286 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 11287 // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) 11288 for { 11289 x0 := v.Args[0] 11290 if x0.Op != OpAMD64MOVBload { 11291 break 11292 } 11293 i := x0.AuxInt 11294 s := x0.Aux 11295 p := x0.Args[0] 11296 mem := x0.Args[1] 11297 s0 := v.Args[1] 11298 if s0.Op != OpAMD64SHLLconst { 11299 break 11300 } 11301 if s0.AuxInt != 8 { 11302 break 11303 } 11304 x1 := s0.Args[0] 11305 if x1.Op != OpAMD64MOVBload { 11306 break 11307 } 11308 if x1.AuxInt != i+1 { 11309 break 11310 } 11311 if x1.Aux != s { 11312 break 11313 } 11314 if p != x1.Args[0] { 11315 break 11316 } 11317 if mem != x1.Args[1] { 11318 break 11319 } 11320 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 11321 break 11322 } 11323 b = mergePoint(b, x0, x1) 11324 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16()) 11325 v.reset(OpCopy) 11326 v.AddArg(v0) 11327 v0.AuxInt = i 11328 v0.Aux = s 11329 v0.AddArg(p) 11330 v0.AddArg(mem) 11331 return true 11332 } 11333 // match: (ORL o0:(ORL x0:(MOVWload [i] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem))) s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem))) 11334 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) 11335 // result: @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem) 11336 for { 11337 o0 := v.Args[0] 11338 if o0.Op != OpAMD64ORL { 11339 break 11340 } 11341 x0 := o0.Args[0] 11342 if x0.Op != OpAMD64MOVWload { 11343 break 11344 } 11345 i := x0.AuxInt 11346 s := x0.Aux 11347 p := x0.Args[0] 11348 mem := x0.Args[1] 11349 s0 := o0.Args[1] 11350 if s0.Op != OpAMD64SHLLconst { 11351 break 11352 } 11353 if s0.AuxInt != 16 { 11354 break 11355 } 11356 x1 := s0.Args[0] 11357 if x1.Op != OpAMD64MOVBload { 11358 break 11359 } 11360 if x1.AuxInt != i+2 { 11361 break 11362 } 11363 if x1.Aux != s { 11364 break 11365 } 11366 if p != x1.Args[0] { 11367 break 11368 } 11369 if mem != x1.Args[1] { 11370 break 11371 } 11372 s1 := v.Args[1] 11373 if s1.Op != OpAMD64SHLLconst { 11374 break 11375 } 11376 if s1.AuxInt != 24 { 11377 break 11378 } 11379 x2 := s1.Args[0] 11380 if x2.Op != OpAMD64MOVBload { 11381 break 11382 } 11383 if x2.AuxInt != i+3 { 11384 break 11385 } 11386 if x2.Aux != s { 11387 break 11388 } 11389 if p != x2.Args[0] { 11390 break 11391 } 11392 if mem != x2.Args[1] { 11393 break 11394 } 11395 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { 11396 break 11397 } 11398 b = mergePoint(b, x0, x1, x2) 11399 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 11400 v.reset(OpCopy) 11401 v.AddArg(v0) 11402 v0.AuxInt = i 11403 v0.Aux = s 11404 v0.AddArg(p) 11405 v0.AddArg(mem) 11406 return true 11407 } 11408 // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) 11409 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 11410 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem) 11411 for { 11412 x0 := v.Args[0] 11413 if x0.Op != OpAMD64MOVBloadidx1 { 11414 break 11415 } 11416 i := x0.AuxInt 11417 s := x0.Aux 11418 p := x0.Args[0] 11419 idx := x0.Args[1] 11420 mem := x0.Args[2] 11421 s0 := v.Args[1] 11422 if s0.Op != OpAMD64SHLLconst { 11423 break 11424 } 11425 if s0.AuxInt != 8 { 11426 break 11427 } 11428 x1 := s0.Args[0] 11429 if x1.Op != OpAMD64MOVBloadidx1 { 11430 break 11431 } 11432 if x1.AuxInt != i+1 { 11433 break 11434 } 11435 if x1.Aux != s { 11436 break 11437 } 11438 if p != x1.Args[0] { 11439 break 11440 } 11441 if idx != x1.Args[1] { 11442 break 11443 } 11444 if mem != x1.Args[2] { 11445 break 11446 } 11447 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 11448 break 11449 } 11450 b = mergePoint(b, x0, x1) 11451 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 11452 v.reset(OpCopy) 11453 v.AddArg(v0) 11454 v0.AuxInt = i 11455 v0.Aux = s 11456 v0.AddArg(p) 11457 v0.AddArg(idx) 11458 v0.AddArg(mem) 11459 return true 11460 } 11461 // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem))) 11462 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) 11463 // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 <v.Type> [i] {s} p idx mem) 11464 for { 11465 o0 := v.Args[0] 11466 if o0.Op != OpAMD64ORL { 11467 break 11468 } 11469 x0 := o0.Args[0] 11470 if x0.Op != OpAMD64MOVWloadidx1 { 11471 break 11472 } 11473 i := x0.AuxInt 11474 s := x0.Aux 11475 p := x0.Args[0] 11476 idx := x0.Args[1] 11477 mem := x0.Args[2] 11478 s0 := o0.Args[1] 11479 if s0.Op != OpAMD64SHLLconst { 11480 break 11481 } 11482 if s0.AuxInt != 16 { 11483 break 11484 } 11485 x1 := s0.Args[0] 11486 if x1.Op != OpAMD64MOVBloadidx1 { 11487 break 11488 } 11489 if x1.AuxInt != i+2 { 11490 break 11491 } 11492 if x1.Aux != s { 11493 break 11494 } 11495 if p != x1.Args[0] { 11496 break 11497 } 11498 if idx != x1.Args[1] { 11499 break 11500 } 11501 if mem != x1.Args[2] { 11502 break 11503 } 11504 s1 := v.Args[1] 11505 if s1.Op != OpAMD64SHLLconst { 11506 break 11507 } 11508 if s1.AuxInt != 24 { 11509 break 11510 } 11511 x2 := s1.Args[0] 11512 if x2.Op != OpAMD64MOVBloadidx1 { 11513 break 11514 } 11515 if x2.AuxInt != i+3 { 11516 break 11517 } 11518 if x2.Aux != s { 11519 break 11520 } 11521 if p != x2.Args[0] { 11522 break 11523 } 11524 if idx != x2.Args[1] { 11525 break 11526 } 11527 if mem != x2.Args[2] { 11528 break 11529 } 11530 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { 11531 break 11532 } 11533 b = mergePoint(b, x0, x1, x2) 11534 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 11535 v.reset(OpCopy) 11536 v.AddArg(v0) 11537 v0.AuxInt = i 11538 v0.Aux = s 11539 v0.AddArg(p) 11540 v0.AddArg(idx) 11541 v0.AddArg(mem) 11542 return true 11543 } 11544 // match: (ORL o1:(ORL o0:(ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i-1] {s} p mem))) s1:(SHLLconst [16] x2:(MOVBload [i-2] {s} p mem))) s2:(SHLLconst [24] x3:(MOVBload [i-3] {s} p mem))) 11545 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) 11546 // result: @mergePoint(b,x0,x1,x2,x3) (BSWAPL <v.Type> (MOVLload [i-3] {s} p mem)) 11547 for { 11548 o1 := v.Args[0] 11549 if o1.Op != OpAMD64ORL { 11550 break 11551 } 11552 o0 := o1.Args[0] 11553 if o0.Op != OpAMD64ORL { 11554 break 11555 } 11556 x0 := o0.Args[0] 11557 if x0.Op != OpAMD64MOVBload { 11558 break 11559 } 11560 i := x0.AuxInt 11561 s := x0.Aux 11562 p := x0.Args[0] 11563 mem := x0.Args[1] 11564 s0 := o0.Args[1] 11565 if s0.Op != OpAMD64SHLLconst { 11566 break 11567 } 11568 if s0.AuxInt != 8 { 11569 break 11570 } 11571 x1 := s0.Args[0] 11572 if x1.Op != OpAMD64MOVBload { 11573 break 11574 } 11575 if x1.AuxInt != i-1 { 11576 break 11577 } 11578 if x1.Aux != s { 11579 break 11580 } 11581 if p != x1.Args[0] { 11582 break 11583 } 11584 if mem != x1.Args[1] { 11585 break 11586 } 11587 s1 := o1.Args[1] 11588 if s1.Op != OpAMD64SHLLconst { 11589 break 11590 } 11591 if s1.AuxInt != 16 { 11592 break 11593 } 11594 x2 := s1.Args[0] 11595 if x2.Op != OpAMD64MOVBload { 11596 break 11597 } 11598 if x2.AuxInt != i-2 { 11599 break 11600 } 11601 if x2.Aux != s { 11602 break 11603 } 11604 if p != x2.Args[0] { 11605 break 11606 } 11607 if mem != x2.Args[1] { 11608 break 11609 } 11610 s2 := v.Args[1] 11611 if s2.Op != OpAMD64SHLLconst { 11612 break 11613 } 11614 if s2.AuxInt != 24 { 11615 break 11616 } 11617 x3 := s2.Args[0] 11618 if x3.Op != OpAMD64MOVBload { 11619 break 11620 } 11621 if x3.AuxInt != i-3 { 11622 break 11623 } 11624 if x3.Aux != s { 11625 break 11626 } 11627 if p != x3.Args[0] { 11628 break 11629 } 11630 if mem != x3.Args[1] { 11631 break 11632 } 11633 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { 11634 break 11635 } 11636 b = mergePoint(b, x0, x1, x2, x3) 11637 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 11638 v.reset(OpCopy) 11639 v.AddArg(v0) 11640 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 11641 v1.AuxInt = i - 3 11642 v1.Aux = s 11643 v1.AddArg(p) 11644 v1.AddArg(mem) 11645 v0.AddArg(v1) 11646 return true 11647 } 11648 // match: (ORL o1:(ORL o0:(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i-1] {s} p idx mem))) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i-2] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i-3] {s} p idx mem))) 11649 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) 11650 // result: @mergePoint(b,x0,x1,x2,x3) (BSWAPL <v.Type> (MOVLloadidx1 <v.Type> [i-3] {s} p idx mem)) 11651 for { 11652 o1 := v.Args[0] 11653 if o1.Op != OpAMD64ORL { 11654 break 11655 } 11656 o0 := o1.Args[0] 11657 if o0.Op != OpAMD64ORL { 11658 break 11659 } 11660 x0 := o0.Args[0] 11661 if x0.Op != OpAMD64MOVBloadidx1 { 11662 break 11663 } 11664 i := x0.AuxInt 11665 s := x0.Aux 11666 p := x0.Args[0] 11667 idx := x0.Args[1] 11668 mem := x0.Args[2] 11669 s0 := o0.Args[1] 11670 if s0.Op != OpAMD64SHLLconst { 11671 break 11672 } 11673 if s0.AuxInt != 8 { 11674 break 11675 } 11676 x1 := s0.Args[0] 11677 if x1.Op != OpAMD64MOVBloadidx1 { 11678 break 11679 } 11680 if x1.AuxInt != i-1 { 11681 break 11682 } 11683 if x1.Aux != s { 11684 break 11685 } 11686 if p != x1.Args[0] { 11687 break 11688 } 11689 if idx != x1.Args[1] { 11690 break 11691 } 11692 if mem != x1.Args[2] { 11693 break 11694 } 11695 s1 := o1.Args[1] 11696 if s1.Op != OpAMD64SHLLconst { 11697 break 11698 } 11699 if s1.AuxInt != 16 { 11700 break 11701 } 11702 x2 := s1.Args[0] 11703 if x2.Op != OpAMD64MOVBloadidx1 { 11704 break 11705 } 11706 if x2.AuxInt != i-2 { 11707 break 11708 } 11709 if x2.Aux != s { 11710 break 11711 } 11712 if p != x2.Args[0] { 11713 break 11714 } 11715 if idx != x2.Args[1] { 11716 break 11717 } 11718 if mem != x2.Args[2] { 11719 break 11720 } 11721 s2 := v.Args[1] 11722 if s2.Op != OpAMD64SHLLconst { 11723 break 11724 } 11725 if s2.AuxInt != 24 { 11726 break 11727 } 11728 x3 := s2.Args[0] 11729 if x3.Op != OpAMD64MOVBloadidx1 { 11730 break 11731 } 11732 if x3.AuxInt != i-3 { 11733 break 11734 } 11735 if x3.Aux != s { 11736 break 11737 } 11738 if p != x3.Args[0] { 11739 break 11740 } 11741 if idx != x3.Args[1] { 11742 break 11743 } 11744 if mem != x3.Args[2] { 11745 break 11746 } 11747 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { 11748 break 11749 } 11750 b = mergePoint(b, x0, x1, x2, x3) 11751 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 11752 v.reset(OpCopy) 11753 v.AddArg(v0) 11754 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 11755 v1.AuxInt = i - 3 11756 v1.Aux = s 11757 v1.AddArg(p) 11758 v1.AddArg(idx) 11759 v1.AddArg(mem) 11760 v0.AddArg(v1) 11761 return true 11762 } 11763 return false 11764 } 11765 func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool { 11766 b := v.Block 11767 _ = b 11768 // match: (ORLconst [c] x) 11769 // cond: int32(c)==0 11770 // result: x 11771 for { 11772 c := v.AuxInt 11773 x := v.Args[0] 11774 if !(int32(c) == 0) { 11775 break 11776 } 11777 v.reset(OpCopy) 11778 v.Type = x.Type 11779 v.AddArg(x) 11780 return true 11781 } 11782 // match: (ORLconst [c] _) 11783 // cond: int32(c)==-1 11784 // result: (MOVLconst [-1]) 11785 for { 11786 c := v.AuxInt 11787 if !(int32(c) == -1) { 11788 break 11789 } 11790 v.reset(OpAMD64MOVLconst) 11791 v.AuxInt = -1 11792 return true 11793 } 11794 // match: (ORLconst [c] (MOVLconst [d])) 11795 // cond: 11796 // result: (MOVLconst [c|d]) 11797 for { 11798 c := v.AuxInt 11799 v_0 := v.Args[0] 11800 if v_0.Op != OpAMD64MOVLconst { 11801 break 11802 } 11803 d := v_0.AuxInt 11804 v.reset(OpAMD64MOVLconst) 11805 v.AuxInt = c | d 11806 return true 11807 } 11808 return false 11809 } 11810 func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { 11811 b := v.Block 11812 _ = b 11813 // match: (ORQ x (MOVQconst [c])) 11814 // cond: is32Bit(c) 11815 // result: (ORQconst [c] x) 11816 for { 11817 x := v.Args[0] 11818 v_1 := v.Args[1] 11819 if v_1.Op != OpAMD64MOVQconst { 11820 break 11821 } 11822 c := v_1.AuxInt 11823 if !(is32Bit(c)) { 11824 break 11825 } 11826 v.reset(OpAMD64ORQconst) 11827 v.AuxInt = c 11828 v.AddArg(x) 11829 return true 11830 } 11831 // match: (ORQ (MOVQconst [c]) x) 11832 // cond: is32Bit(c) 11833 // result: (ORQconst [c] x) 11834 for { 11835 v_0 := v.Args[0] 11836 if v_0.Op != OpAMD64MOVQconst { 11837 break 11838 } 11839 c := v_0.AuxInt 11840 x := v.Args[1] 11841 if !(is32Bit(c)) { 11842 break 11843 } 11844 v.reset(OpAMD64ORQconst) 11845 v.AuxInt = c 11846 v.AddArg(x) 11847 return true 11848 } 11849 // match: ( ORQ (SHLQconst x [c]) (SHRQconst x [64-c])) 11850 // cond: 11851 // result: (ROLQconst x [ c]) 11852 for { 11853 v_0 := v.Args[0] 11854 if v_0.Op != OpAMD64SHLQconst { 11855 break 11856 } 11857 c := v_0.AuxInt 11858 x := v_0.Args[0] 11859 v_1 := v.Args[1] 11860 if v_1.Op != OpAMD64SHRQconst { 11861 break 11862 } 11863 if v_1.AuxInt != 64-c { 11864 break 11865 } 11866 if x != v_1.Args[0] { 11867 break 11868 } 11869 v.reset(OpAMD64ROLQconst) 11870 v.AuxInt = c 11871 v.AddArg(x) 11872 return true 11873 } 11874 // match: ( ORQ (SHRQconst x [c]) (SHLQconst x [64-c])) 11875 // cond: 11876 // result: (ROLQconst x [64-c]) 11877 for { 11878 v_0 := v.Args[0] 11879 if v_0.Op != OpAMD64SHRQconst { 11880 break 11881 } 11882 c := v_0.AuxInt 11883 x := v_0.Args[0] 11884 v_1 := v.Args[1] 11885 if v_1.Op != OpAMD64SHLQconst { 11886 break 11887 } 11888 if v_1.AuxInt != 64-c { 11889 break 11890 } 11891 if x != v_1.Args[0] { 11892 break 11893 } 11894 v.reset(OpAMD64ROLQconst) 11895 v.AuxInt = 64 - c 11896 v.AddArg(x) 11897 return true 11898 } 11899 // match: (ORQ x x) 11900 // cond: 11901 // result: x 11902 for { 11903 x := v.Args[0] 11904 if x != v.Args[1] { 11905 break 11906 } 11907 v.reset(OpCopy) 11908 v.Type = x.Type 11909 v.AddArg(x) 11910 return true 11911 } 11912 // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBload [i] {s} p mem) s0:(SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) 11913 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 11914 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) 11915 for { 11916 o0 := v.Args[0] 11917 if o0.Op != OpAMD64ORQ { 11918 break 11919 } 11920 o1 := o0.Args[0] 11921 if o1.Op != OpAMD64ORQ { 11922 break 11923 } 11924 o2 := o1.Args[0] 11925 if o2.Op != OpAMD64ORQ { 11926 break 11927 } 11928 o3 := o2.Args[0] 11929 if o3.Op != OpAMD64ORQ { 11930 break 11931 } 11932 o4 := o3.Args[0] 11933 if o4.Op != OpAMD64ORQ { 11934 break 11935 } 11936 o5 := o4.Args[0] 11937 if o5.Op != OpAMD64ORQ { 11938 break 11939 } 11940 x0 := o5.Args[0] 11941 if x0.Op != OpAMD64MOVBload { 11942 break 11943 } 11944 i := x0.AuxInt 11945 s := x0.Aux 11946 p := x0.Args[0] 11947 mem := x0.Args[1] 11948 s0 := o5.Args[1] 11949 if s0.Op != OpAMD64SHLQconst { 11950 break 11951 } 11952 if s0.AuxInt != 8 { 11953 break 11954 } 11955 x1 := s0.Args[0] 11956 if x1.Op != OpAMD64MOVBload { 11957 break 11958 } 11959 if x1.AuxInt != i+1 { 11960 break 11961 } 11962 if x1.Aux != s { 11963 break 11964 } 11965 if p != x1.Args[0] { 11966 break 11967 } 11968 if mem != x1.Args[1] { 11969 break 11970 } 11971 s1 := o4.Args[1] 11972 if s1.Op != OpAMD64SHLQconst { 11973 break 11974 } 11975 if s1.AuxInt != 16 { 11976 break 11977 } 11978 x2 := s1.Args[0] 11979 if x2.Op != OpAMD64MOVBload { 11980 break 11981 } 11982 if x2.AuxInt != i+2 { 11983 break 11984 } 11985 if x2.Aux != s { 11986 break 11987 } 11988 if p != x2.Args[0] { 11989 break 11990 } 11991 if mem != x2.Args[1] { 11992 break 11993 } 11994 s2 := o3.Args[1] 11995 if s2.Op != OpAMD64SHLQconst { 11996 break 11997 } 11998 if s2.AuxInt != 24 { 11999 break 12000 } 12001 x3 := s2.Args[0] 12002 if x3.Op != OpAMD64MOVBload { 12003 break 12004 } 12005 if x3.AuxInt != i+3 { 12006 break 12007 } 12008 if x3.Aux != s { 12009 break 12010 } 12011 if p != x3.Args[0] { 12012 break 12013 } 12014 if mem != x3.Args[1] { 12015 break 12016 } 12017 s3 := o2.Args[1] 12018 if s3.Op != OpAMD64SHLQconst { 12019 break 12020 } 12021 if s3.AuxInt != 32 { 12022 break 12023 } 12024 x4 := s3.Args[0] 12025 if x4.Op != OpAMD64MOVBload { 12026 break 12027 } 12028 if x4.AuxInt != i+4 { 12029 break 12030 } 12031 if x4.Aux != s { 12032 break 12033 } 12034 if p != x4.Args[0] { 12035 break 12036 } 12037 if mem != x4.Args[1] { 12038 break 12039 } 12040 s4 := o1.Args[1] 12041 if s4.Op != OpAMD64SHLQconst { 12042 break 12043 } 12044 if s4.AuxInt != 40 { 12045 break 12046 } 12047 x5 := s4.Args[0] 12048 if x5.Op != OpAMD64MOVBload { 12049 break 12050 } 12051 if x5.AuxInt != i+5 { 12052 break 12053 } 12054 if x5.Aux != s { 12055 break 12056 } 12057 if p != x5.Args[0] { 12058 break 12059 } 12060 if mem != x5.Args[1] { 12061 break 12062 } 12063 s5 := o0.Args[1] 12064 if s5.Op != OpAMD64SHLQconst { 12065 break 12066 } 12067 if s5.AuxInt != 48 { 12068 break 12069 } 12070 x6 := s5.Args[0] 12071 if x6.Op != OpAMD64MOVBload { 12072 break 12073 } 12074 if x6.AuxInt != i+6 { 12075 break 12076 } 12077 if x6.Aux != s { 12078 break 12079 } 12080 if p != x6.Args[0] { 12081 break 12082 } 12083 if mem != x6.Args[1] { 12084 break 12085 } 12086 s6 := v.Args[1] 12087 if s6.Op != OpAMD64SHLQconst { 12088 break 12089 } 12090 if s6.AuxInt != 56 { 12091 break 12092 } 12093 x7 := s6.Args[0] 12094 if x7.Op != OpAMD64MOVBload { 12095 break 12096 } 12097 if x7.AuxInt != i+7 { 12098 break 12099 } 12100 if x7.Aux != s { 12101 break 12102 } 12103 if p != x7.Args[0] { 12104 break 12105 } 12106 if mem != x7.Args[1] { 12107 break 12108 } 12109 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 12110 break 12111 } 12112 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 12113 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64()) 12114 v.reset(OpCopy) 12115 v.AddArg(v0) 12116 v0.AuxInt = i 12117 v0.Aux = s 12118 v0.AddArg(p) 12119 v0.AddArg(mem) 12120 return true 12121 } 12122 // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) 12123 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 12124 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem) 12125 for { 12126 o0 := v.Args[0] 12127 if o0.Op != OpAMD64ORQ { 12128 break 12129 } 12130 o1 := o0.Args[0] 12131 if o1.Op != OpAMD64ORQ { 12132 break 12133 } 12134 o2 := o1.Args[0] 12135 if o2.Op != OpAMD64ORQ { 12136 break 12137 } 12138 o3 := o2.Args[0] 12139 if o3.Op != OpAMD64ORQ { 12140 break 12141 } 12142 o4 := o3.Args[0] 12143 if o4.Op != OpAMD64ORQ { 12144 break 12145 } 12146 o5 := o4.Args[0] 12147 if o5.Op != OpAMD64ORQ { 12148 break 12149 } 12150 x0 := o5.Args[0] 12151 if x0.Op != OpAMD64MOVBloadidx1 { 12152 break 12153 } 12154 i := x0.AuxInt 12155 s := x0.Aux 12156 p := x0.Args[0] 12157 idx := x0.Args[1] 12158 mem := x0.Args[2] 12159 s0 := o5.Args[1] 12160 if s0.Op != OpAMD64SHLQconst { 12161 break 12162 } 12163 if s0.AuxInt != 8 { 12164 break 12165 } 12166 x1 := s0.Args[0] 12167 if x1.Op != OpAMD64MOVBloadidx1 { 12168 break 12169 } 12170 if x1.AuxInt != i+1 { 12171 break 12172 } 12173 if x1.Aux != s { 12174 break 12175 } 12176 if p != x1.Args[0] { 12177 break 12178 } 12179 if idx != x1.Args[1] { 12180 break 12181 } 12182 if mem != x1.Args[2] { 12183 break 12184 } 12185 s1 := o4.Args[1] 12186 if s1.Op != OpAMD64SHLQconst { 12187 break 12188 } 12189 if s1.AuxInt != 16 { 12190 break 12191 } 12192 x2 := s1.Args[0] 12193 if x2.Op != OpAMD64MOVBloadidx1 { 12194 break 12195 } 12196 if x2.AuxInt != i+2 { 12197 break 12198 } 12199 if x2.Aux != s { 12200 break 12201 } 12202 if p != x2.Args[0] { 12203 break 12204 } 12205 if idx != x2.Args[1] { 12206 break 12207 } 12208 if mem != x2.Args[2] { 12209 break 12210 } 12211 s2 := o3.Args[1] 12212 if s2.Op != OpAMD64SHLQconst { 12213 break 12214 } 12215 if s2.AuxInt != 24 { 12216 break 12217 } 12218 x3 := s2.Args[0] 12219 if x3.Op != OpAMD64MOVBloadidx1 { 12220 break 12221 } 12222 if x3.AuxInt != i+3 { 12223 break 12224 } 12225 if x3.Aux != s { 12226 break 12227 } 12228 if p != x3.Args[0] { 12229 break 12230 } 12231 if idx != x3.Args[1] { 12232 break 12233 } 12234 if mem != x3.Args[2] { 12235 break 12236 } 12237 s3 := o2.Args[1] 12238 if s3.Op != OpAMD64SHLQconst { 12239 break 12240 } 12241 if s3.AuxInt != 32 { 12242 break 12243 } 12244 x4 := s3.Args[0] 12245 if x4.Op != OpAMD64MOVBloadidx1 { 12246 break 12247 } 12248 if x4.AuxInt != i+4 { 12249 break 12250 } 12251 if x4.Aux != s { 12252 break 12253 } 12254 if p != x4.Args[0] { 12255 break 12256 } 12257 if idx != x4.Args[1] { 12258 break 12259 } 12260 if mem != x4.Args[2] { 12261 break 12262 } 12263 s4 := o1.Args[1] 12264 if s4.Op != OpAMD64SHLQconst { 12265 break 12266 } 12267 if s4.AuxInt != 40 { 12268 break 12269 } 12270 x5 := s4.Args[0] 12271 if x5.Op != OpAMD64MOVBloadidx1 { 12272 break 12273 } 12274 if x5.AuxInt != i+5 { 12275 break 12276 } 12277 if x5.Aux != s { 12278 break 12279 } 12280 if p != x5.Args[0] { 12281 break 12282 } 12283 if idx != x5.Args[1] { 12284 break 12285 } 12286 if mem != x5.Args[2] { 12287 break 12288 } 12289 s5 := o0.Args[1] 12290 if s5.Op != OpAMD64SHLQconst { 12291 break 12292 } 12293 if s5.AuxInt != 48 { 12294 break 12295 } 12296 x6 := s5.Args[0] 12297 if x6.Op != OpAMD64MOVBloadidx1 { 12298 break 12299 } 12300 if x6.AuxInt != i+6 { 12301 break 12302 } 12303 if x6.Aux != s { 12304 break 12305 } 12306 if p != x6.Args[0] { 12307 break 12308 } 12309 if idx != x6.Args[1] { 12310 break 12311 } 12312 if mem != x6.Args[2] { 12313 break 12314 } 12315 s6 := v.Args[1] 12316 if s6.Op != OpAMD64SHLQconst { 12317 break 12318 } 12319 if s6.AuxInt != 56 { 12320 break 12321 } 12322 x7 := s6.Args[0] 12323 if x7.Op != OpAMD64MOVBloadidx1 { 12324 break 12325 } 12326 if x7.AuxInt != i+7 { 12327 break 12328 } 12329 if x7.Aux != s { 12330 break 12331 } 12332 if p != x7.Args[0] { 12333 break 12334 } 12335 if idx != x7.Args[1] { 12336 break 12337 } 12338 if mem != x7.Args[2] { 12339 break 12340 } 12341 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 12342 break 12343 } 12344 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 12345 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, v.Type) 12346 v.reset(OpCopy) 12347 v.AddArg(v0) 12348 v0.AuxInt = i 12349 v0.Aux = s 12350 v0.AddArg(p) 12351 v0.AddArg(idx) 12352 v0.AddArg(mem) 12353 return true 12354 } 12355 // match: (ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ x0:(MOVBload [i] {s} p mem) s0:(SHLQconst [8] x1:(MOVBload [i-1] {s} p mem))) s1:(SHLQconst [16] x2:(MOVBload [i-2] {s} p mem))) s2:(SHLQconst [24] x3:(MOVBload [i-3] {s} p mem))) s3:(SHLQconst [32] x4:(MOVBload [i-4] {s} p mem))) s4:(SHLQconst [40] x5:(MOVBload [i-5] {s} p mem))) s5:(SHLQconst [48] x6:(MOVBload [i-6] {s} p mem))) s6:(SHLQconst [56] x7:(MOVBload [i-7] {s} p mem))) 12356 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 12357 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ <v.Type> (MOVQload [i-7] {s} p mem)) 12358 for { 12359 o5 := v.Args[0] 12360 if o5.Op != OpAMD64ORQ { 12361 break 12362 } 12363 o4 := o5.Args[0] 12364 if o4.Op != OpAMD64ORQ { 12365 break 12366 } 12367 o3 := o4.Args[0] 12368 if o3.Op != OpAMD64ORQ { 12369 break 12370 } 12371 o2 := o3.Args[0] 12372 if o2.Op != OpAMD64ORQ { 12373 break 12374 } 12375 o1 := o2.Args[0] 12376 if o1.Op != OpAMD64ORQ { 12377 break 12378 } 12379 o0 := o1.Args[0] 12380 if o0.Op != OpAMD64ORQ { 12381 break 12382 } 12383 x0 := o0.Args[0] 12384 if x0.Op != OpAMD64MOVBload { 12385 break 12386 } 12387 i := x0.AuxInt 12388 s := x0.Aux 12389 p := x0.Args[0] 12390 mem := x0.Args[1] 12391 s0 := o0.Args[1] 12392 if s0.Op != OpAMD64SHLQconst { 12393 break 12394 } 12395 if s0.AuxInt != 8 { 12396 break 12397 } 12398 x1 := s0.Args[0] 12399 if x1.Op != OpAMD64MOVBload { 12400 break 12401 } 12402 if x1.AuxInt != i-1 { 12403 break 12404 } 12405 if x1.Aux != s { 12406 break 12407 } 12408 if p != x1.Args[0] { 12409 break 12410 } 12411 if mem != x1.Args[1] { 12412 break 12413 } 12414 s1 := o1.Args[1] 12415 if s1.Op != OpAMD64SHLQconst { 12416 break 12417 } 12418 if s1.AuxInt != 16 { 12419 break 12420 } 12421 x2 := s1.Args[0] 12422 if x2.Op != OpAMD64MOVBload { 12423 break 12424 } 12425 if x2.AuxInt != i-2 { 12426 break 12427 } 12428 if x2.Aux != s { 12429 break 12430 } 12431 if p != x2.Args[0] { 12432 break 12433 } 12434 if mem != x2.Args[1] { 12435 break 12436 } 12437 s2 := o2.Args[1] 12438 if s2.Op != OpAMD64SHLQconst { 12439 break 12440 } 12441 if s2.AuxInt != 24 { 12442 break 12443 } 12444 x3 := s2.Args[0] 12445 if x3.Op != OpAMD64MOVBload { 12446 break 12447 } 12448 if x3.AuxInt != i-3 { 12449 break 12450 } 12451 if x3.Aux != s { 12452 break 12453 } 12454 if p != x3.Args[0] { 12455 break 12456 } 12457 if mem != x3.Args[1] { 12458 break 12459 } 12460 s3 := o3.Args[1] 12461 if s3.Op != OpAMD64SHLQconst { 12462 break 12463 } 12464 if s3.AuxInt != 32 { 12465 break 12466 } 12467 x4 := s3.Args[0] 12468 if x4.Op != OpAMD64MOVBload { 12469 break 12470 } 12471 if x4.AuxInt != i-4 { 12472 break 12473 } 12474 if x4.Aux != s { 12475 break 12476 } 12477 if p != x4.Args[0] { 12478 break 12479 } 12480 if mem != x4.Args[1] { 12481 break 12482 } 12483 s4 := o4.Args[1] 12484 if s4.Op != OpAMD64SHLQconst { 12485 break 12486 } 12487 if s4.AuxInt != 40 { 12488 break 12489 } 12490 x5 := s4.Args[0] 12491 if x5.Op != OpAMD64MOVBload { 12492 break 12493 } 12494 if x5.AuxInt != i-5 { 12495 break 12496 } 12497 if x5.Aux != s { 12498 break 12499 } 12500 if p != x5.Args[0] { 12501 break 12502 } 12503 if mem != x5.Args[1] { 12504 break 12505 } 12506 s5 := o5.Args[1] 12507 if s5.Op != OpAMD64SHLQconst { 12508 break 12509 } 12510 if s5.AuxInt != 48 { 12511 break 12512 } 12513 x6 := s5.Args[0] 12514 if x6.Op != OpAMD64MOVBload { 12515 break 12516 } 12517 if x6.AuxInt != i-6 { 12518 break 12519 } 12520 if x6.Aux != s { 12521 break 12522 } 12523 if p != x6.Args[0] { 12524 break 12525 } 12526 if mem != x6.Args[1] { 12527 break 12528 } 12529 s6 := v.Args[1] 12530 if s6.Op != OpAMD64SHLQconst { 12531 break 12532 } 12533 if s6.AuxInt != 56 { 12534 break 12535 } 12536 x7 := s6.Args[0] 12537 if x7.Op != OpAMD64MOVBload { 12538 break 12539 } 12540 if x7.AuxInt != i-7 { 12541 break 12542 } 12543 if x7.Aux != s { 12544 break 12545 } 12546 if p != x7.Args[0] { 12547 break 12548 } 12549 if mem != x7.Args[1] { 12550 break 12551 } 12552 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 12553 break 12554 } 12555 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 12556 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 12557 v.reset(OpCopy) 12558 v.AddArg(v0) 12559 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64()) 12560 v1.AuxInt = i - 7 12561 v1.Aux = s 12562 v1.AddArg(p) 12563 v1.AddArg(mem) 12564 v0.AddArg(v1) 12565 return true 12566 } 12567 // match: (ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLQconst [8] x1:(MOVBloadidx1 [i-1] {s} p idx mem))) s1:(SHLQconst [16] x2:(MOVBloadidx1 [i-2] {s} p idx mem))) s2:(SHLQconst [24] x3:(MOVBloadidx1 [i-3] {s} p idx mem))) s3:(SHLQconst [32] x4:(MOVBloadidx1 [i-4] {s} p idx mem))) s4:(SHLQconst [40] x5:(MOVBloadidx1 [i-5] {s} p idx mem))) s5:(SHLQconst [48] x6:(MOVBloadidx1 [i-6] {s} p idx mem))) s6:(SHLQconst [56] x7:(MOVBloadidx1 [i-7] {s} p idx mem))) 12568 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 12569 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ <v.Type> (MOVQloadidx1 <v.Type> [i-7] {s} p idx mem)) 12570 for { 12571 o5 := v.Args[0] 12572 if o5.Op != OpAMD64ORQ { 12573 break 12574 } 12575 o4 := o5.Args[0] 12576 if o4.Op != OpAMD64ORQ { 12577 break 12578 } 12579 o3 := o4.Args[0] 12580 if o3.Op != OpAMD64ORQ { 12581 break 12582 } 12583 o2 := o3.Args[0] 12584 if o2.Op != OpAMD64ORQ { 12585 break 12586 } 12587 o1 := o2.Args[0] 12588 if o1.Op != OpAMD64ORQ { 12589 break 12590 } 12591 o0 := o1.Args[0] 12592 if o0.Op != OpAMD64ORQ { 12593 break 12594 } 12595 x0 := o0.Args[0] 12596 if x0.Op != OpAMD64MOVBloadidx1 { 12597 break 12598 } 12599 i := x0.AuxInt 12600 s := x0.Aux 12601 p := x0.Args[0] 12602 idx := x0.Args[1] 12603 mem := x0.Args[2] 12604 s0 := o0.Args[1] 12605 if s0.Op != OpAMD64SHLQconst { 12606 break 12607 } 12608 if s0.AuxInt != 8 { 12609 break 12610 } 12611 x1 := s0.Args[0] 12612 if x1.Op != OpAMD64MOVBloadidx1 { 12613 break 12614 } 12615 if x1.AuxInt != i-1 { 12616 break 12617 } 12618 if x1.Aux != s { 12619 break 12620 } 12621 if p != x1.Args[0] { 12622 break 12623 } 12624 if idx != x1.Args[1] { 12625 break 12626 } 12627 if mem != x1.Args[2] { 12628 break 12629 } 12630 s1 := o1.Args[1] 12631 if s1.Op != OpAMD64SHLQconst { 12632 break 12633 } 12634 if s1.AuxInt != 16 { 12635 break 12636 } 12637 x2 := s1.Args[0] 12638 if x2.Op != OpAMD64MOVBloadidx1 { 12639 break 12640 } 12641 if x2.AuxInt != i-2 { 12642 break 12643 } 12644 if x2.Aux != s { 12645 break 12646 } 12647 if p != x2.Args[0] { 12648 break 12649 } 12650 if idx != x2.Args[1] { 12651 break 12652 } 12653 if mem != x2.Args[2] { 12654 break 12655 } 12656 s2 := o2.Args[1] 12657 if s2.Op != OpAMD64SHLQconst { 12658 break 12659 } 12660 if s2.AuxInt != 24 { 12661 break 12662 } 12663 x3 := s2.Args[0] 12664 if x3.Op != OpAMD64MOVBloadidx1 { 12665 break 12666 } 12667 if x3.AuxInt != i-3 { 12668 break 12669 } 12670 if x3.Aux != s { 12671 break 12672 } 12673 if p != x3.Args[0] { 12674 break 12675 } 12676 if idx != x3.Args[1] { 12677 break 12678 } 12679 if mem != x3.Args[2] { 12680 break 12681 } 12682 s3 := o3.Args[1] 12683 if s3.Op != OpAMD64SHLQconst { 12684 break 12685 } 12686 if s3.AuxInt != 32 { 12687 break 12688 } 12689 x4 := s3.Args[0] 12690 if x4.Op != OpAMD64MOVBloadidx1 { 12691 break 12692 } 12693 if x4.AuxInt != i-4 { 12694 break 12695 } 12696 if x4.Aux != s { 12697 break 12698 } 12699 if p != x4.Args[0] { 12700 break 12701 } 12702 if idx != x4.Args[1] { 12703 break 12704 } 12705 if mem != x4.Args[2] { 12706 break 12707 } 12708 s4 := o4.Args[1] 12709 if s4.Op != OpAMD64SHLQconst { 12710 break 12711 } 12712 if s4.AuxInt != 40 { 12713 break 12714 } 12715 x5 := s4.Args[0] 12716 if x5.Op != OpAMD64MOVBloadidx1 { 12717 break 12718 } 12719 if x5.AuxInt != i-5 { 12720 break 12721 } 12722 if x5.Aux != s { 12723 break 12724 } 12725 if p != x5.Args[0] { 12726 break 12727 } 12728 if idx != x5.Args[1] { 12729 break 12730 } 12731 if mem != x5.Args[2] { 12732 break 12733 } 12734 s5 := o5.Args[1] 12735 if s5.Op != OpAMD64SHLQconst { 12736 break 12737 } 12738 if s5.AuxInt != 48 { 12739 break 12740 } 12741 x6 := s5.Args[0] 12742 if x6.Op != OpAMD64MOVBloadidx1 { 12743 break 12744 } 12745 if x6.AuxInt != i-6 { 12746 break 12747 } 12748 if x6.Aux != s { 12749 break 12750 } 12751 if p != x6.Args[0] { 12752 break 12753 } 12754 if idx != x6.Args[1] { 12755 break 12756 } 12757 if mem != x6.Args[2] { 12758 break 12759 } 12760 s6 := v.Args[1] 12761 if s6.Op != OpAMD64SHLQconst { 12762 break 12763 } 12764 if s6.AuxInt != 56 { 12765 break 12766 } 12767 x7 := s6.Args[0] 12768 if x7.Op != OpAMD64MOVBloadidx1 { 12769 break 12770 } 12771 if x7.AuxInt != i-7 { 12772 break 12773 } 12774 if x7.Aux != s { 12775 break 12776 } 12777 if p != x7.Args[0] { 12778 break 12779 } 12780 if idx != x7.Args[1] { 12781 break 12782 } 12783 if mem != x7.Args[2] { 12784 break 12785 } 12786 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 12787 break 12788 } 12789 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 12790 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 12791 v.reset(OpCopy) 12792 v.AddArg(v0) 12793 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, v.Type) 12794 v1.AuxInt = i - 7 12795 v1.Aux = s 12796 v1.AddArg(p) 12797 v1.AddArg(idx) 12798 v1.AddArg(mem) 12799 v0.AddArg(v1) 12800 return true 12801 } 12802 return false 12803 } 12804 func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool { 12805 b := v.Block 12806 _ = b 12807 // match: (ORQconst [0] x) 12808 // cond: 12809 // result: x 12810 for { 12811 if v.AuxInt != 0 { 12812 break 12813 } 12814 x := v.Args[0] 12815 v.reset(OpCopy) 12816 v.Type = x.Type 12817 v.AddArg(x) 12818 return true 12819 } 12820 // match: (ORQconst [-1] _) 12821 // cond: 12822 // result: (MOVQconst [-1]) 12823 for { 12824 if v.AuxInt != -1 { 12825 break 12826 } 12827 v.reset(OpAMD64MOVQconst) 12828 v.AuxInt = -1 12829 return true 12830 } 12831 // match: (ORQconst [c] (MOVQconst [d])) 12832 // cond: 12833 // result: (MOVQconst [c|d]) 12834 for { 12835 c := v.AuxInt 12836 v_0 := v.Args[0] 12837 if v_0.Op != OpAMD64MOVQconst { 12838 break 12839 } 12840 d := v_0.AuxInt 12841 v.reset(OpAMD64MOVQconst) 12842 v.AuxInt = c | d 12843 return true 12844 } 12845 return false 12846 } 12847 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool { 12848 b := v.Block 12849 _ = b 12850 // match: (ROLBconst [c] (ROLBconst [d] x)) 12851 // cond: 12852 // result: (ROLBconst [(c+d)& 7] x) 12853 for { 12854 c := v.AuxInt 12855 v_0 := v.Args[0] 12856 if v_0.Op != OpAMD64ROLBconst { 12857 break 12858 } 12859 d := v_0.AuxInt 12860 x := v_0.Args[0] 12861 v.reset(OpAMD64ROLBconst) 12862 v.AuxInt = (c + d) & 7 12863 v.AddArg(x) 12864 return true 12865 } 12866 // match: (ROLBconst x [0]) 12867 // cond: 12868 // result: x 12869 for { 12870 if v.AuxInt != 0 { 12871 break 12872 } 12873 x := v.Args[0] 12874 v.reset(OpCopy) 12875 v.Type = x.Type 12876 v.AddArg(x) 12877 return true 12878 } 12879 return false 12880 } 12881 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool { 12882 b := v.Block 12883 _ = b 12884 // match: (ROLLconst [c] (ROLLconst [d] x)) 12885 // cond: 12886 // result: (ROLLconst [(c+d)&31] x) 12887 for { 12888 c := v.AuxInt 12889 v_0 := v.Args[0] 12890 if v_0.Op != OpAMD64ROLLconst { 12891 break 12892 } 12893 d := v_0.AuxInt 12894 x := v_0.Args[0] 12895 v.reset(OpAMD64ROLLconst) 12896 v.AuxInt = (c + d) & 31 12897 v.AddArg(x) 12898 return true 12899 } 12900 // match: (ROLLconst x [0]) 12901 // cond: 12902 // result: x 12903 for { 12904 if v.AuxInt != 0 { 12905 break 12906 } 12907 x := v.Args[0] 12908 v.reset(OpCopy) 12909 v.Type = x.Type 12910 v.AddArg(x) 12911 return true 12912 } 12913 return false 12914 } 12915 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool { 12916 b := v.Block 12917 _ = b 12918 // match: (ROLQconst [c] (ROLQconst [d] x)) 12919 // cond: 12920 // result: (ROLQconst [(c+d)&63] x) 12921 for { 12922 c := v.AuxInt 12923 v_0 := v.Args[0] 12924 if v_0.Op != OpAMD64ROLQconst { 12925 break 12926 } 12927 d := v_0.AuxInt 12928 x := v_0.Args[0] 12929 v.reset(OpAMD64ROLQconst) 12930 v.AuxInt = (c + d) & 63 12931 v.AddArg(x) 12932 return true 12933 } 12934 // match: (ROLQconst x [0]) 12935 // cond: 12936 // result: x 12937 for { 12938 if v.AuxInt != 0 { 12939 break 12940 } 12941 x := v.Args[0] 12942 v.reset(OpCopy) 12943 v.Type = x.Type 12944 v.AddArg(x) 12945 return true 12946 } 12947 return false 12948 } 12949 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool { 12950 b := v.Block 12951 _ = b 12952 // match: (ROLWconst [c] (ROLWconst [d] x)) 12953 // cond: 12954 // result: (ROLWconst [(c+d)&15] x) 12955 for { 12956 c := v.AuxInt 12957 v_0 := v.Args[0] 12958 if v_0.Op != OpAMD64ROLWconst { 12959 break 12960 } 12961 d := v_0.AuxInt 12962 x := v_0.Args[0] 12963 v.reset(OpAMD64ROLWconst) 12964 v.AuxInt = (c + d) & 15 12965 v.AddArg(x) 12966 return true 12967 } 12968 // match: (ROLWconst x [0]) 12969 // cond: 12970 // result: x 12971 for { 12972 if v.AuxInt != 0 { 12973 break 12974 } 12975 x := v.Args[0] 12976 v.reset(OpCopy) 12977 v.Type = x.Type 12978 v.AddArg(x) 12979 return true 12980 } 12981 return false 12982 } 12983 func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool { 12984 b := v.Block 12985 _ = b 12986 // match: (SARB x (MOVQconst [c])) 12987 // cond: 12988 // result: (SARBconst [min(c&31,7)] x) 12989 for { 12990 x := v.Args[0] 12991 v_1 := v.Args[1] 12992 if v_1.Op != OpAMD64MOVQconst { 12993 break 12994 } 12995 c := v_1.AuxInt 12996 v.reset(OpAMD64SARBconst) 12997 v.AuxInt = min(c&31, 7) 12998 v.AddArg(x) 12999 return true 13000 } 13001 // match: (SARB x (MOVLconst [c])) 13002 // cond: 13003 // result: (SARBconst [min(c&31,7)] x) 13004 for { 13005 x := v.Args[0] 13006 v_1 := v.Args[1] 13007 if v_1.Op != OpAMD64MOVLconst { 13008 break 13009 } 13010 c := v_1.AuxInt 13011 v.reset(OpAMD64SARBconst) 13012 v.AuxInt = min(c&31, 7) 13013 v.AddArg(x) 13014 return true 13015 } 13016 return false 13017 } 13018 func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool { 13019 b := v.Block 13020 _ = b 13021 // match: (SARBconst x [0]) 13022 // cond: 13023 // result: x 13024 for { 13025 if v.AuxInt != 0 { 13026 break 13027 } 13028 x := v.Args[0] 13029 v.reset(OpCopy) 13030 v.Type = x.Type 13031 v.AddArg(x) 13032 return true 13033 } 13034 // match: (SARBconst [c] (MOVQconst [d])) 13035 // cond: 13036 // result: (MOVQconst [d>>uint64(c)]) 13037 for { 13038 c := v.AuxInt 13039 v_0 := v.Args[0] 13040 if v_0.Op != OpAMD64MOVQconst { 13041 break 13042 } 13043 d := v_0.AuxInt 13044 v.reset(OpAMD64MOVQconst) 13045 v.AuxInt = d >> uint64(c) 13046 return true 13047 } 13048 return false 13049 } 13050 func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool { 13051 b := v.Block 13052 _ = b 13053 // match: (SARL x (MOVQconst [c])) 13054 // cond: 13055 // result: (SARLconst [c&31] x) 13056 for { 13057 x := v.Args[0] 13058 v_1 := v.Args[1] 13059 if v_1.Op != OpAMD64MOVQconst { 13060 break 13061 } 13062 c := v_1.AuxInt 13063 v.reset(OpAMD64SARLconst) 13064 v.AuxInt = c & 31 13065 v.AddArg(x) 13066 return true 13067 } 13068 // match: (SARL x (MOVLconst [c])) 13069 // cond: 13070 // result: (SARLconst [c&31] x) 13071 for { 13072 x := v.Args[0] 13073 v_1 := v.Args[1] 13074 if v_1.Op != OpAMD64MOVLconst { 13075 break 13076 } 13077 c := v_1.AuxInt 13078 v.reset(OpAMD64SARLconst) 13079 v.AuxInt = c & 31 13080 v.AddArg(x) 13081 return true 13082 } 13083 // match: (SARL x (ANDLconst [31] y)) 13084 // cond: 13085 // result: (SARL x y) 13086 for { 13087 x := v.Args[0] 13088 v_1 := v.Args[1] 13089 if v_1.Op != OpAMD64ANDLconst { 13090 break 13091 } 13092 if v_1.AuxInt != 31 { 13093 break 13094 } 13095 y := v_1.Args[0] 13096 v.reset(OpAMD64SARL) 13097 v.AddArg(x) 13098 v.AddArg(y) 13099 return true 13100 } 13101 return false 13102 } 13103 func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool { 13104 b := v.Block 13105 _ = b 13106 // match: (SARLconst x [0]) 13107 // cond: 13108 // result: x 13109 for { 13110 if v.AuxInt != 0 { 13111 break 13112 } 13113 x := v.Args[0] 13114 v.reset(OpCopy) 13115 v.Type = x.Type 13116 v.AddArg(x) 13117 return true 13118 } 13119 // match: (SARLconst [c] (MOVQconst [d])) 13120 // cond: 13121 // result: (MOVQconst [d>>uint64(c)]) 13122 for { 13123 c := v.AuxInt 13124 v_0 := v.Args[0] 13125 if v_0.Op != OpAMD64MOVQconst { 13126 break 13127 } 13128 d := v_0.AuxInt 13129 v.reset(OpAMD64MOVQconst) 13130 v.AuxInt = d >> uint64(c) 13131 return true 13132 } 13133 return false 13134 } 13135 func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool { 13136 b := v.Block 13137 _ = b 13138 // match: (SARQ x (MOVQconst [c])) 13139 // cond: 13140 // result: (SARQconst [c&63] x) 13141 for { 13142 x := v.Args[0] 13143 v_1 := v.Args[1] 13144 if v_1.Op != OpAMD64MOVQconst { 13145 break 13146 } 13147 c := v_1.AuxInt 13148 v.reset(OpAMD64SARQconst) 13149 v.AuxInt = c & 63 13150 v.AddArg(x) 13151 return true 13152 } 13153 // match: (SARQ x (MOVLconst [c])) 13154 // cond: 13155 // result: (SARQconst [c&63] x) 13156 for { 13157 x := v.Args[0] 13158 v_1 := v.Args[1] 13159 if v_1.Op != OpAMD64MOVLconst { 13160 break 13161 } 13162 c := v_1.AuxInt 13163 v.reset(OpAMD64SARQconst) 13164 v.AuxInt = c & 63 13165 v.AddArg(x) 13166 return true 13167 } 13168 // match: (SARQ x (ANDQconst [63] y)) 13169 // cond: 13170 // result: (SARQ x y) 13171 for { 13172 x := v.Args[0] 13173 v_1 := v.Args[1] 13174 if v_1.Op != OpAMD64ANDQconst { 13175 break 13176 } 13177 if v_1.AuxInt != 63 { 13178 break 13179 } 13180 y := v_1.Args[0] 13181 v.reset(OpAMD64SARQ) 13182 v.AddArg(x) 13183 v.AddArg(y) 13184 return true 13185 } 13186 return false 13187 } 13188 func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool { 13189 b := v.Block 13190 _ = b 13191 // match: (SARQconst x [0]) 13192 // cond: 13193 // result: x 13194 for { 13195 if v.AuxInt != 0 { 13196 break 13197 } 13198 x := v.Args[0] 13199 v.reset(OpCopy) 13200 v.Type = x.Type 13201 v.AddArg(x) 13202 return true 13203 } 13204 // match: (SARQconst [c] (MOVQconst [d])) 13205 // cond: 13206 // result: (MOVQconst [d>>uint64(c)]) 13207 for { 13208 c := v.AuxInt 13209 v_0 := v.Args[0] 13210 if v_0.Op != OpAMD64MOVQconst { 13211 break 13212 } 13213 d := v_0.AuxInt 13214 v.reset(OpAMD64MOVQconst) 13215 v.AuxInt = d >> uint64(c) 13216 return true 13217 } 13218 return false 13219 } 13220 func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool { 13221 b := v.Block 13222 _ = b 13223 // match: (SARW x (MOVQconst [c])) 13224 // cond: 13225 // result: (SARWconst [min(c&31,15)] x) 13226 for { 13227 x := v.Args[0] 13228 v_1 := v.Args[1] 13229 if v_1.Op != OpAMD64MOVQconst { 13230 break 13231 } 13232 c := v_1.AuxInt 13233 v.reset(OpAMD64SARWconst) 13234 v.AuxInt = min(c&31, 15) 13235 v.AddArg(x) 13236 return true 13237 } 13238 // match: (SARW x (MOVLconst [c])) 13239 // cond: 13240 // result: (SARWconst [min(c&31,15)] x) 13241 for { 13242 x := v.Args[0] 13243 v_1 := v.Args[1] 13244 if v_1.Op != OpAMD64MOVLconst { 13245 break 13246 } 13247 c := v_1.AuxInt 13248 v.reset(OpAMD64SARWconst) 13249 v.AuxInt = min(c&31, 15) 13250 v.AddArg(x) 13251 return true 13252 } 13253 return false 13254 } 13255 func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool { 13256 b := v.Block 13257 _ = b 13258 // match: (SARWconst x [0]) 13259 // cond: 13260 // result: x 13261 for { 13262 if v.AuxInt != 0 { 13263 break 13264 } 13265 x := v.Args[0] 13266 v.reset(OpCopy) 13267 v.Type = x.Type 13268 v.AddArg(x) 13269 return true 13270 } 13271 // match: (SARWconst [c] (MOVQconst [d])) 13272 // cond: 13273 // result: (MOVQconst [d>>uint64(c)]) 13274 for { 13275 c := v.AuxInt 13276 v_0 := v.Args[0] 13277 if v_0.Op != OpAMD64MOVQconst { 13278 break 13279 } 13280 d := v_0.AuxInt 13281 v.reset(OpAMD64MOVQconst) 13282 v.AuxInt = d >> uint64(c) 13283 return true 13284 } 13285 return false 13286 } 13287 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool { 13288 b := v.Block 13289 _ = b 13290 // match: (SBBLcarrymask (FlagEQ)) 13291 // cond: 13292 // result: (MOVLconst [0]) 13293 for { 13294 v_0 := v.Args[0] 13295 if v_0.Op != OpAMD64FlagEQ { 13296 break 13297 } 13298 v.reset(OpAMD64MOVLconst) 13299 v.AuxInt = 0 13300 return true 13301 } 13302 // match: (SBBLcarrymask (FlagLT_ULT)) 13303 // cond: 13304 // result: (MOVLconst [-1]) 13305 for { 13306 v_0 := v.Args[0] 13307 if v_0.Op != OpAMD64FlagLT_ULT { 13308 break 13309 } 13310 v.reset(OpAMD64MOVLconst) 13311 v.AuxInt = -1 13312 return true 13313 } 13314 // match: (SBBLcarrymask (FlagLT_UGT)) 13315 // cond: 13316 // result: (MOVLconst [0]) 13317 for { 13318 v_0 := v.Args[0] 13319 if v_0.Op != OpAMD64FlagLT_UGT { 13320 break 13321 } 13322 v.reset(OpAMD64MOVLconst) 13323 v.AuxInt = 0 13324 return true 13325 } 13326 // match: (SBBLcarrymask (FlagGT_ULT)) 13327 // cond: 13328 // result: (MOVLconst [-1]) 13329 for { 13330 v_0 := v.Args[0] 13331 if v_0.Op != OpAMD64FlagGT_ULT { 13332 break 13333 } 13334 v.reset(OpAMD64MOVLconst) 13335 v.AuxInt = -1 13336 return true 13337 } 13338 // match: (SBBLcarrymask (FlagGT_UGT)) 13339 // cond: 13340 // result: (MOVLconst [0]) 13341 for { 13342 v_0 := v.Args[0] 13343 if v_0.Op != OpAMD64FlagGT_UGT { 13344 break 13345 } 13346 v.reset(OpAMD64MOVLconst) 13347 v.AuxInt = 0 13348 return true 13349 } 13350 return false 13351 } 13352 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool { 13353 b := v.Block 13354 _ = b 13355 // match: (SBBQcarrymask (FlagEQ)) 13356 // cond: 13357 // result: (MOVQconst [0]) 13358 for { 13359 v_0 := v.Args[0] 13360 if v_0.Op != OpAMD64FlagEQ { 13361 break 13362 } 13363 v.reset(OpAMD64MOVQconst) 13364 v.AuxInt = 0 13365 return true 13366 } 13367 // match: (SBBQcarrymask (FlagLT_ULT)) 13368 // cond: 13369 // result: (MOVQconst [-1]) 13370 for { 13371 v_0 := v.Args[0] 13372 if v_0.Op != OpAMD64FlagLT_ULT { 13373 break 13374 } 13375 v.reset(OpAMD64MOVQconst) 13376 v.AuxInt = -1 13377 return true 13378 } 13379 // match: (SBBQcarrymask (FlagLT_UGT)) 13380 // cond: 13381 // result: (MOVQconst [0]) 13382 for { 13383 v_0 := v.Args[0] 13384 if v_0.Op != OpAMD64FlagLT_UGT { 13385 break 13386 } 13387 v.reset(OpAMD64MOVQconst) 13388 v.AuxInt = 0 13389 return true 13390 } 13391 // match: (SBBQcarrymask (FlagGT_ULT)) 13392 // cond: 13393 // result: (MOVQconst [-1]) 13394 for { 13395 v_0 := v.Args[0] 13396 if v_0.Op != OpAMD64FlagGT_ULT { 13397 break 13398 } 13399 v.reset(OpAMD64MOVQconst) 13400 v.AuxInt = -1 13401 return true 13402 } 13403 // match: (SBBQcarrymask (FlagGT_UGT)) 13404 // cond: 13405 // result: (MOVQconst [0]) 13406 for { 13407 v_0 := v.Args[0] 13408 if v_0.Op != OpAMD64FlagGT_UGT { 13409 break 13410 } 13411 v.reset(OpAMD64MOVQconst) 13412 v.AuxInt = 0 13413 return true 13414 } 13415 return false 13416 } 13417 func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool { 13418 b := v.Block 13419 _ = b 13420 // match: (SETA (InvertFlags x)) 13421 // cond: 13422 // result: (SETB x) 13423 for { 13424 v_0 := v.Args[0] 13425 if v_0.Op != OpAMD64InvertFlags { 13426 break 13427 } 13428 x := v_0.Args[0] 13429 v.reset(OpAMD64SETB) 13430 v.AddArg(x) 13431 return true 13432 } 13433 // match: (SETA (FlagEQ)) 13434 // cond: 13435 // result: (MOVLconst [0]) 13436 for { 13437 v_0 := v.Args[0] 13438 if v_0.Op != OpAMD64FlagEQ { 13439 break 13440 } 13441 v.reset(OpAMD64MOVLconst) 13442 v.AuxInt = 0 13443 return true 13444 } 13445 // match: (SETA (FlagLT_ULT)) 13446 // cond: 13447 // result: (MOVLconst [0]) 13448 for { 13449 v_0 := v.Args[0] 13450 if v_0.Op != OpAMD64FlagLT_ULT { 13451 break 13452 } 13453 v.reset(OpAMD64MOVLconst) 13454 v.AuxInt = 0 13455 return true 13456 } 13457 // match: (SETA (FlagLT_UGT)) 13458 // cond: 13459 // result: (MOVLconst [1]) 13460 for { 13461 v_0 := v.Args[0] 13462 if v_0.Op != OpAMD64FlagLT_UGT { 13463 break 13464 } 13465 v.reset(OpAMD64MOVLconst) 13466 v.AuxInt = 1 13467 return true 13468 } 13469 // match: (SETA (FlagGT_ULT)) 13470 // cond: 13471 // result: (MOVLconst [0]) 13472 for { 13473 v_0 := v.Args[0] 13474 if v_0.Op != OpAMD64FlagGT_ULT { 13475 break 13476 } 13477 v.reset(OpAMD64MOVLconst) 13478 v.AuxInt = 0 13479 return true 13480 } 13481 // match: (SETA (FlagGT_UGT)) 13482 // cond: 13483 // result: (MOVLconst [1]) 13484 for { 13485 v_0 := v.Args[0] 13486 if v_0.Op != OpAMD64FlagGT_UGT { 13487 break 13488 } 13489 v.reset(OpAMD64MOVLconst) 13490 v.AuxInt = 1 13491 return true 13492 } 13493 return false 13494 } 13495 func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool { 13496 b := v.Block 13497 _ = b 13498 // match: (SETAE (InvertFlags x)) 13499 // cond: 13500 // result: (SETBE x) 13501 for { 13502 v_0 := v.Args[0] 13503 if v_0.Op != OpAMD64InvertFlags { 13504 break 13505 } 13506 x := v_0.Args[0] 13507 v.reset(OpAMD64SETBE) 13508 v.AddArg(x) 13509 return true 13510 } 13511 // match: (SETAE (FlagEQ)) 13512 // cond: 13513 // result: (MOVLconst [1]) 13514 for { 13515 v_0 := v.Args[0] 13516 if v_0.Op != OpAMD64FlagEQ { 13517 break 13518 } 13519 v.reset(OpAMD64MOVLconst) 13520 v.AuxInt = 1 13521 return true 13522 } 13523 // match: (SETAE (FlagLT_ULT)) 13524 // cond: 13525 // result: (MOVLconst [0]) 13526 for { 13527 v_0 := v.Args[0] 13528 if v_0.Op != OpAMD64FlagLT_ULT { 13529 break 13530 } 13531 v.reset(OpAMD64MOVLconst) 13532 v.AuxInt = 0 13533 return true 13534 } 13535 // match: (SETAE (FlagLT_UGT)) 13536 // cond: 13537 // result: (MOVLconst [1]) 13538 for { 13539 v_0 := v.Args[0] 13540 if v_0.Op != OpAMD64FlagLT_UGT { 13541 break 13542 } 13543 v.reset(OpAMD64MOVLconst) 13544 v.AuxInt = 1 13545 return true 13546 } 13547 // match: (SETAE (FlagGT_ULT)) 13548 // cond: 13549 // result: (MOVLconst [0]) 13550 for { 13551 v_0 := v.Args[0] 13552 if v_0.Op != OpAMD64FlagGT_ULT { 13553 break 13554 } 13555 v.reset(OpAMD64MOVLconst) 13556 v.AuxInt = 0 13557 return true 13558 } 13559 // match: (SETAE (FlagGT_UGT)) 13560 // cond: 13561 // result: (MOVLconst [1]) 13562 for { 13563 v_0 := v.Args[0] 13564 if v_0.Op != OpAMD64FlagGT_UGT { 13565 break 13566 } 13567 v.reset(OpAMD64MOVLconst) 13568 v.AuxInt = 1 13569 return true 13570 } 13571 return false 13572 } 13573 func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool { 13574 b := v.Block 13575 _ = b 13576 // match: (SETB (InvertFlags x)) 13577 // cond: 13578 // result: (SETA x) 13579 for { 13580 v_0 := v.Args[0] 13581 if v_0.Op != OpAMD64InvertFlags { 13582 break 13583 } 13584 x := v_0.Args[0] 13585 v.reset(OpAMD64SETA) 13586 v.AddArg(x) 13587 return true 13588 } 13589 // match: (SETB (FlagEQ)) 13590 // cond: 13591 // result: (MOVLconst [0]) 13592 for { 13593 v_0 := v.Args[0] 13594 if v_0.Op != OpAMD64FlagEQ { 13595 break 13596 } 13597 v.reset(OpAMD64MOVLconst) 13598 v.AuxInt = 0 13599 return true 13600 } 13601 // match: (SETB (FlagLT_ULT)) 13602 // cond: 13603 // result: (MOVLconst [1]) 13604 for { 13605 v_0 := v.Args[0] 13606 if v_0.Op != OpAMD64FlagLT_ULT { 13607 break 13608 } 13609 v.reset(OpAMD64MOVLconst) 13610 v.AuxInt = 1 13611 return true 13612 } 13613 // match: (SETB (FlagLT_UGT)) 13614 // cond: 13615 // result: (MOVLconst [0]) 13616 for { 13617 v_0 := v.Args[0] 13618 if v_0.Op != OpAMD64FlagLT_UGT { 13619 break 13620 } 13621 v.reset(OpAMD64MOVLconst) 13622 v.AuxInt = 0 13623 return true 13624 } 13625 // match: (SETB (FlagGT_ULT)) 13626 // cond: 13627 // result: (MOVLconst [1]) 13628 for { 13629 v_0 := v.Args[0] 13630 if v_0.Op != OpAMD64FlagGT_ULT { 13631 break 13632 } 13633 v.reset(OpAMD64MOVLconst) 13634 v.AuxInt = 1 13635 return true 13636 } 13637 // match: (SETB (FlagGT_UGT)) 13638 // cond: 13639 // result: (MOVLconst [0]) 13640 for { 13641 v_0 := v.Args[0] 13642 if v_0.Op != OpAMD64FlagGT_UGT { 13643 break 13644 } 13645 v.reset(OpAMD64MOVLconst) 13646 v.AuxInt = 0 13647 return true 13648 } 13649 return false 13650 } 13651 func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool { 13652 b := v.Block 13653 _ = b 13654 // match: (SETBE (InvertFlags x)) 13655 // cond: 13656 // result: (SETAE x) 13657 for { 13658 v_0 := v.Args[0] 13659 if v_0.Op != OpAMD64InvertFlags { 13660 break 13661 } 13662 x := v_0.Args[0] 13663 v.reset(OpAMD64SETAE) 13664 v.AddArg(x) 13665 return true 13666 } 13667 // match: (SETBE (FlagEQ)) 13668 // cond: 13669 // result: (MOVLconst [1]) 13670 for { 13671 v_0 := v.Args[0] 13672 if v_0.Op != OpAMD64FlagEQ { 13673 break 13674 } 13675 v.reset(OpAMD64MOVLconst) 13676 v.AuxInt = 1 13677 return true 13678 } 13679 // match: (SETBE (FlagLT_ULT)) 13680 // cond: 13681 // result: (MOVLconst [1]) 13682 for { 13683 v_0 := v.Args[0] 13684 if v_0.Op != OpAMD64FlagLT_ULT { 13685 break 13686 } 13687 v.reset(OpAMD64MOVLconst) 13688 v.AuxInt = 1 13689 return true 13690 } 13691 // match: (SETBE (FlagLT_UGT)) 13692 // cond: 13693 // result: (MOVLconst [0]) 13694 for { 13695 v_0 := v.Args[0] 13696 if v_0.Op != OpAMD64FlagLT_UGT { 13697 break 13698 } 13699 v.reset(OpAMD64MOVLconst) 13700 v.AuxInt = 0 13701 return true 13702 } 13703 // match: (SETBE (FlagGT_ULT)) 13704 // cond: 13705 // result: (MOVLconst [1]) 13706 for { 13707 v_0 := v.Args[0] 13708 if v_0.Op != OpAMD64FlagGT_ULT { 13709 break 13710 } 13711 v.reset(OpAMD64MOVLconst) 13712 v.AuxInt = 1 13713 return true 13714 } 13715 // match: (SETBE (FlagGT_UGT)) 13716 // cond: 13717 // result: (MOVLconst [0]) 13718 for { 13719 v_0 := v.Args[0] 13720 if v_0.Op != OpAMD64FlagGT_UGT { 13721 break 13722 } 13723 v.reset(OpAMD64MOVLconst) 13724 v.AuxInt = 0 13725 return true 13726 } 13727 return false 13728 } 13729 func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool { 13730 b := v.Block 13731 _ = b 13732 // match: (SETEQ (InvertFlags x)) 13733 // cond: 13734 // result: (SETEQ x) 13735 for { 13736 v_0 := v.Args[0] 13737 if v_0.Op != OpAMD64InvertFlags { 13738 break 13739 } 13740 x := v_0.Args[0] 13741 v.reset(OpAMD64SETEQ) 13742 v.AddArg(x) 13743 return true 13744 } 13745 // match: (SETEQ (FlagEQ)) 13746 // cond: 13747 // result: (MOVLconst [1]) 13748 for { 13749 v_0 := v.Args[0] 13750 if v_0.Op != OpAMD64FlagEQ { 13751 break 13752 } 13753 v.reset(OpAMD64MOVLconst) 13754 v.AuxInt = 1 13755 return true 13756 } 13757 // match: (SETEQ (FlagLT_ULT)) 13758 // cond: 13759 // result: (MOVLconst [0]) 13760 for { 13761 v_0 := v.Args[0] 13762 if v_0.Op != OpAMD64FlagLT_ULT { 13763 break 13764 } 13765 v.reset(OpAMD64MOVLconst) 13766 v.AuxInt = 0 13767 return true 13768 } 13769 // match: (SETEQ (FlagLT_UGT)) 13770 // cond: 13771 // result: (MOVLconst [0]) 13772 for { 13773 v_0 := v.Args[0] 13774 if v_0.Op != OpAMD64FlagLT_UGT { 13775 break 13776 } 13777 v.reset(OpAMD64MOVLconst) 13778 v.AuxInt = 0 13779 return true 13780 } 13781 // match: (SETEQ (FlagGT_ULT)) 13782 // cond: 13783 // result: (MOVLconst [0]) 13784 for { 13785 v_0 := v.Args[0] 13786 if v_0.Op != OpAMD64FlagGT_ULT { 13787 break 13788 } 13789 v.reset(OpAMD64MOVLconst) 13790 v.AuxInt = 0 13791 return true 13792 } 13793 // match: (SETEQ (FlagGT_UGT)) 13794 // cond: 13795 // result: (MOVLconst [0]) 13796 for { 13797 v_0 := v.Args[0] 13798 if v_0.Op != OpAMD64FlagGT_UGT { 13799 break 13800 } 13801 v.reset(OpAMD64MOVLconst) 13802 v.AuxInt = 0 13803 return true 13804 } 13805 return false 13806 } 13807 func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool { 13808 b := v.Block 13809 _ = b 13810 // match: (SETG (InvertFlags x)) 13811 // cond: 13812 // result: (SETL x) 13813 for { 13814 v_0 := v.Args[0] 13815 if v_0.Op != OpAMD64InvertFlags { 13816 break 13817 } 13818 x := v_0.Args[0] 13819 v.reset(OpAMD64SETL) 13820 v.AddArg(x) 13821 return true 13822 } 13823 // match: (SETG (FlagEQ)) 13824 // cond: 13825 // result: (MOVLconst [0]) 13826 for { 13827 v_0 := v.Args[0] 13828 if v_0.Op != OpAMD64FlagEQ { 13829 break 13830 } 13831 v.reset(OpAMD64MOVLconst) 13832 v.AuxInt = 0 13833 return true 13834 } 13835 // match: (SETG (FlagLT_ULT)) 13836 // cond: 13837 // result: (MOVLconst [0]) 13838 for { 13839 v_0 := v.Args[0] 13840 if v_0.Op != OpAMD64FlagLT_ULT { 13841 break 13842 } 13843 v.reset(OpAMD64MOVLconst) 13844 v.AuxInt = 0 13845 return true 13846 } 13847 // match: (SETG (FlagLT_UGT)) 13848 // cond: 13849 // result: (MOVLconst [0]) 13850 for { 13851 v_0 := v.Args[0] 13852 if v_0.Op != OpAMD64FlagLT_UGT { 13853 break 13854 } 13855 v.reset(OpAMD64MOVLconst) 13856 v.AuxInt = 0 13857 return true 13858 } 13859 // match: (SETG (FlagGT_ULT)) 13860 // cond: 13861 // result: (MOVLconst [1]) 13862 for { 13863 v_0 := v.Args[0] 13864 if v_0.Op != OpAMD64FlagGT_ULT { 13865 break 13866 } 13867 v.reset(OpAMD64MOVLconst) 13868 v.AuxInt = 1 13869 return true 13870 } 13871 // match: (SETG (FlagGT_UGT)) 13872 // cond: 13873 // result: (MOVLconst [1]) 13874 for { 13875 v_0 := v.Args[0] 13876 if v_0.Op != OpAMD64FlagGT_UGT { 13877 break 13878 } 13879 v.reset(OpAMD64MOVLconst) 13880 v.AuxInt = 1 13881 return true 13882 } 13883 return false 13884 } 13885 func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool { 13886 b := v.Block 13887 _ = b 13888 // match: (SETGE (InvertFlags x)) 13889 // cond: 13890 // result: (SETLE x) 13891 for { 13892 v_0 := v.Args[0] 13893 if v_0.Op != OpAMD64InvertFlags { 13894 break 13895 } 13896 x := v_0.Args[0] 13897 v.reset(OpAMD64SETLE) 13898 v.AddArg(x) 13899 return true 13900 } 13901 // match: (SETGE (FlagEQ)) 13902 // cond: 13903 // result: (MOVLconst [1]) 13904 for { 13905 v_0 := v.Args[0] 13906 if v_0.Op != OpAMD64FlagEQ { 13907 break 13908 } 13909 v.reset(OpAMD64MOVLconst) 13910 v.AuxInt = 1 13911 return true 13912 } 13913 // match: (SETGE (FlagLT_ULT)) 13914 // cond: 13915 // result: (MOVLconst [0]) 13916 for { 13917 v_0 := v.Args[0] 13918 if v_0.Op != OpAMD64FlagLT_ULT { 13919 break 13920 } 13921 v.reset(OpAMD64MOVLconst) 13922 v.AuxInt = 0 13923 return true 13924 } 13925 // match: (SETGE (FlagLT_UGT)) 13926 // cond: 13927 // result: (MOVLconst [0]) 13928 for { 13929 v_0 := v.Args[0] 13930 if v_0.Op != OpAMD64FlagLT_UGT { 13931 break 13932 } 13933 v.reset(OpAMD64MOVLconst) 13934 v.AuxInt = 0 13935 return true 13936 } 13937 // match: (SETGE (FlagGT_ULT)) 13938 // cond: 13939 // result: (MOVLconst [1]) 13940 for { 13941 v_0 := v.Args[0] 13942 if v_0.Op != OpAMD64FlagGT_ULT { 13943 break 13944 } 13945 v.reset(OpAMD64MOVLconst) 13946 v.AuxInt = 1 13947 return true 13948 } 13949 // match: (SETGE (FlagGT_UGT)) 13950 // cond: 13951 // result: (MOVLconst [1]) 13952 for { 13953 v_0 := v.Args[0] 13954 if v_0.Op != OpAMD64FlagGT_UGT { 13955 break 13956 } 13957 v.reset(OpAMD64MOVLconst) 13958 v.AuxInt = 1 13959 return true 13960 } 13961 return false 13962 } 13963 func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool { 13964 b := v.Block 13965 _ = b 13966 // match: (SETL (InvertFlags x)) 13967 // cond: 13968 // result: (SETG x) 13969 for { 13970 v_0 := v.Args[0] 13971 if v_0.Op != OpAMD64InvertFlags { 13972 break 13973 } 13974 x := v_0.Args[0] 13975 v.reset(OpAMD64SETG) 13976 v.AddArg(x) 13977 return true 13978 } 13979 // match: (SETL (FlagEQ)) 13980 // cond: 13981 // result: (MOVLconst [0]) 13982 for { 13983 v_0 := v.Args[0] 13984 if v_0.Op != OpAMD64FlagEQ { 13985 break 13986 } 13987 v.reset(OpAMD64MOVLconst) 13988 v.AuxInt = 0 13989 return true 13990 } 13991 // match: (SETL (FlagLT_ULT)) 13992 // cond: 13993 // result: (MOVLconst [1]) 13994 for { 13995 v_0 := v.Args[0] 13996 if v_0.Op != OpAMD64FlagLT_ULT { 13997 break 13998 } 13999 v.reset(OpAMD64MOVLconst) 14000 v.AuxInt = 1 14001 return true 14002 } 14003 // match: (SETL (FlagLT_UGT)) 14004 // cond: 14005 // result: (MOVLconst [1]) 14006 for { 14007 v_0 := v.Args[0] 14008 if v_0.Op != OpAMD64FlagLT_UGT { 14009 break 14010 } 14011 v.reset(OpAMD64MOVLconst) 14012 v.AuxInt = 1 14013 return true 14014 } 14015 // match: (SETL (FlagGT_ULT)) 14016 // cond: 14017 // result: (MOVLconst [0]) 14018 for { 14019 v_0 := v.Args[0] 14020 if v_0.Op != OpAMD64FlagGT_ULT { 14021 break 14022 } 14023 v.reset(OpAMD64MOVLconst) 14024 v.AuxInt = 0 14025 return true 14026 } 14027 // match: (SETL (FlagGT_UGT)) 14028 // cond: 14029 // result: (MOVLconst [0]) 14030 for { 14031 v_0 := v.Args[0] 14032 if v_0.Op != OpAMD64FlagGT_UGT { 14033 break 14034 } 14035 v.reset(OpAMD64MOVLconst) 14036 v.AuxInt = 0 14037 return true 14038 } 14039 return false 14040 } 14041 func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool { 14042 b := v.Block 14043 _ = b 14044 // match: (SETLE (InvertFlags x)) 14045 // cond: 14046 // result: (SETGE x) 14047 for { 14048 v_0 := v.Args[0] 14049 if v_0.Op != OpAMD64InvertFlags { 14050 break 14051 } 14052 x := v_0.Args[0] 14053 v.reset(OpAMD64SETGE) 14054 v.AddArg(x) 14055 return true 14056 } 14057 // match: (SETLE (FlagEQ)) 14058 // cond: 14059 // result: (MOVLconst [1]) 14060 for { 14061 v_0 := v.Args[0] 14062 if v_0.Op != OpAMD64FlagEQ { 14063 break 14064 } 14065 v.reset(OpAMD64MOVLconst) 14066 v.AuxInt = 1 14067 return true 14068 } 14069 // match: (SETLE (FlagLT_ULT)) 14070 // cond: 14071 // result: (MOVLconst [1]) 14072 for { 14073 v_0 := v.Args[0] 14074 if v_0.Op != OpAMD64FlagLT_ULT { 14075 break 14076 } 14077 v.reset(OpAMD64MOVLconst) 14078 v.AuxInt = 1 14079 return true 14080 } 14081 // match: (SETLE (FlagLT_UGT)) 14082 // cond: 14083 // result: (MOVLconst [1]) 14084 for { 14085 v_0 := v.Args[0] 14086 if v_0.Op != OpAMD64FlagLT_UGT { 14087 break 14088 } 14089 v.reset(OpAMD64MOVLconst) 14090 v.AuxInt = 1 14091 return true 14092 } 14093 // match: (SETLE (FlagGT_ULT)) 14094 // cond: 14095 // result: (MOVLconst [0]) 14096 for { 14097 v_0 := v.Args[0] 14098 if v_0.Op != OpAMD64FlagGT_ULT { 14099 break 14100 } 14101 v.reset(OpAMD64MOVLconst) 14102 v.AuxInt = 0 14103 return true 14104 } 14105 // match: (SETLE (FlagGT_UGT)) 14106 // cond: 14107 // result: (MOVLconst [0]) 14108 for { 14109 v_0 := v.Args[0] 14110 if v_0.Op != OpAMD64FlagGT_UGT { 14111 break 14112 } 14113 v.reset(OpAMD64MOVLconst) 14114 v.AuxInt = 0 14115 return true 14116 } 14117 return false 14118 } 14119 func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool { 14120 b := v.Block 14121 _ = b 14122 // match: (SETNE (InvertFlags x)) 14123 // cond: 14124 // result: (SETNE x) 14125 for { 14126 v_0 := v.Args[0] 14127 if v_0.Op != OpAMD64InvertFlags { 14128 break 14129 } 14130 x := v_0.Args[0] 14131 v.reset(OpAMD64SETNE) 14132 v.AddArg(x) 14133 return true 14134 } 14135 // match: (SETNE (FlagEQ)) 14136 // cond: 14137 // result: (MOVLconst [0]) 14138 for { 14139 v_0 := v.Args[0] 14140 if v_0.Op != OpAMD64FlagEQ { 14141 break 14142 } 14143 v.reset(OpAMD64MOVLconst) 14144 v.AuxInt = 0 14145 return true 14146 } 14147 // match: (SETNE (FlagLT_ULT)) 14148 // cond: 14149 // result: (MOVLconst [1]) 14150 for { 14151 v_0 := v.Args[0] 14152 if v_0.Op != OpAMD64FlagLT_ULT { 14153 break 14154 } 14155 v.reset(OpAMD64MOVLconst) 14156 v.AuxInt = 1 14157 return true 14158 } 14159 // match: (SETNE (FlagLT_UGT)) 14160 // cond: 14161 // result: (MOVLconst [1]) 14162 for { 14163 v_0 := v.Args[0] 14164 if v_0.Op != OpAMD64FlagLT_UGT { 14165 break 14166 } 14167 v.reset(OpAMD64MOVLconst) 14168 v.AuxInt = 1 14169 return true 14170 } 14171 // match: (SETNE (FlagGT_ULT)) 14172 // cond: 14173 // result: (MOVLconst [1]) 14174 for { 14175 v_0 := v.Args[0] 14176 if v_0.Op != OpAMD64FlagGT_ULT { 14177 break 14178 } 14179 v.reset(OpAMD64MOVLconst) 14180 v.AuxInt = 1 14181 return true 14182 } 14183 // match: (SETNE (FlagGT_UGT)) 14184 // cond: 14185 // result: (MOVLconst [1]) 14186 for { 14187 v_0 := v.Args[0] 14188 if v_0.Op != OpAMD64FlagGT_UGT { 14189 break 14190 } 14191 v.reset(OpAMD64MOVLconst) 14192 v.AuxInt = 1 14193 return true 14194 } 14195 return false 14196 } 14197 func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { 14198 b := v.Block 14199 _ = b 14200 // match: (SHLL x (MOVQconst [c])) 14201 // cond: 14202 // result: (SHLLconst [c&31] x) 14203 for { 14204 x := v.Args[0] 14205 v_1 := v.Args[1] 14206 if v_1.Op != OpAMD64MOVQconst { 14207 break 14208 } 14209 c := v_1.AuxInt 14210 v.reset(OpAMD64SHLLconst) 14211 v.AuxInt = c & 31 14212 v.AddArg(x) 14213 return true 14214 } 14215 // match: (SHLL x (MOVLconst [c])) 14216 // cond: 14217 // result: (SHLLconst [c&31] x) 14218 for { 14219 x := v.Args[0] 14220 v_1 := v.Args[1] 14221 if v_1.Op != OpAMD64MOVLconst { 14222 break 14223 } 14224 c := v_1.AuxInt 14225 v.reset(OpAMD64SHLLconst) 14226 v.AuxInt = c & 31 14227 v.AddArg(x) 14228 return true 14229 } 14230 // match: (SHLL x (ANDLconst [31] y)) 14231 // cond: 14232 // result: (SHLL x y) 14233 for { 14234 x := v.Args[0] 14235 v_1 := v.Args[1] 14236 if v_1.Op != OpAMD64ANDLconst { 14237 break 14238 } 14239 if v_1.AuxInt != 31 { 14240 break 14241 } 14242 y := v_1.Args[0] 14243 v.reset(OpAMD64SHLL) 14244 v.AddArg(x) 14245 v.AddArg(y) 14246 return true 14247 } 14248 return false 14249 } 14250 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value, config *Config) bool { 14251 b := v.Block 14252 _ = b 14253 // match: (SHLLconst x [0]) 14254 // cond: 14255 // result: x 14256 for { 14257 if v.AuxInt != 0 { 14258 break 14259 } 14260 x := v.Args[0] 14261 v.reset(OpCopy) 14262 v.Type = x.Type 14263 v.AddArg(x) 14264 return true 14265 } 14266 return false 14267 } 14268 func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { 14269 b := v.Block 14270 _ = b 14271 // match: (SHLQ x (MOVQconst [c])) 14272 // cond: 14273 // result: (SHLQconst [c&63] x) 14274 for { 14275 x := v.Args[0] 14276 v_1 := v.Args[1] 14277 if v_1.Op != OpAMD64MOVQconst { 14278 break 14279 } 14280 c := v_1.AuxInt 14281 v.reset(OpAMD64SHLQconst) 14282 v.AuxInt = c & 63 14283 v.AddArg(x) 14284 return true 14285 } 14286 // match: (SHLQ x (MOVLconst [c])) 14287 // cond: 14288 // result: (SHLQconst [c&63] x) 14289 for { 14290 x := v.Args[0] 14291 v_1 := v.Args[1] 14292 if v_1.Op != OpAMD64MOVLconst { 14293 break 14294 } 14295 c := v_1.AuxInt 14296 v.reset(OpAMD64SHLQconst) 14297 v.AuxInt = c & 63 14298 v.AddArg(x) 14299 return true 14300 } 14301 // match: (SHLQ x (ANDQconst [63] y)) 14302 // cond: 14303 // result: (SHLQ x y) 14304 for { 14305 x := v.Args[0] 14306 v_1 := v.Args[1] 14307 if v_1.Op != OpAMD64ANDQconst { 14308 break 14309 } 14310 if v_1.AuxInt != 63 { 14311 break 14312 } 14313 y := v_1.Args[0] 14314 v.reset(OpAMD64SHLQ) 14315 v.AddArg(x) 14316 v.AddArg(y) 14317 return true 14318 } 14319 return false 14320 } 14321 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value, config *Config) bool { 14322 b := v.Block 14323 _ = b 14324 // match: (SHLQconst x [0]) 14325 // cond: 14326 // result: x 14327 for { 14328 if v.AuxInt != 0 { 14329 break 14330 } 14331 x := v.Args[0] 14332 v.reset(OpCopy) 14333 v.Type = x.Type 14334 v.AddArg(x) 14335 return true 14336 } 14337 return false 14338 } 14339 func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool { 14340 b := v.Block 14341 _ = b 14342 // match: (SHRB x (MOVQconst [c])) 14343 // cond: c&31 < 8 14344 // result: (SHRBconst [c&31] x) 14345 for { 14346 x := v.Args[0] 14347 v_1 := v.Args[1] 14348 if v_1.Op != OpAMD64MOVQconst { 14349 break 14350 } 14351 c := v_1.AuxInt 14352 if !(c&31 < 8) { 14353 break 14354 } 14355 v.reset(OpAMD64SHRBconst) 14356 v.AuxInt = c & 31 14357 v.AddArg(x) 14358 return true 14359 } 14360 // match: (SHRB x (MOVLconst [c])) 14361 // cond: c&31 < 8 14362 // result: (SHRBconst [c&31] x) 14363 for { 14364 x := v.Args[0] 14365 v_1 := v.Args[1] 14366 if v_1.Op != OpAMD64MOVLconst { 14367 break 14368 } 14369 c := v_1.AuxInt 14370 if !(c&31 < 8) { 14371 break 14372 } 14373 v.reset(OpAMD64SHRBconst) 14374 v.AuxInt = c & 31 14375 v.AddArg(x) 14376 return true 14377 } 14378 // match: (SHRB _ (MOVQconst [c])) 14379 // cond: c&31 >= 8 14380 // result: (MOVLconst [0]) 14381 for { 14382 v_1 := v.Args[1] 14383 if v_1.Op != OpAMD64MOVQconst { 14384 break 14385 } 14386 c := v_1.AuxInt 14387 if !(c&31 >= 8) { 14388 break 14389 } 14390 v.reset(OpAMD64MOVLconst) 14391 v.AuxInt = 0 14392 return true 14393 } 14394 // match: (SHRB _ (MOVLconst [c])) 14395 // cond: c&31 >= 8 14396 // result: (MOVLconst [0]) 14397 for { 14398 v_1 := v.Args[1] 14399 if v_1.Op != OpAMD64MOVLconst { 14400 break 14401 } 14402 c := v_1.AuxInt 14403 if !(c&31 >= 8) { 14404 break 14405 } 14406 v.reset(OpAMD64MOVLconst) 14407 v.AuxInt = 0 14408 return true 14409 } 14410 return false 14411 } 14412 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value, config *Config) bool { 14413 b := v.Block 14414 _ = b 14415 // match: (SHRBconst x [0]) 14416 // cond: 14417 // result: x 14418 for { 14419 if v.AuxInt != 0 { 14420 break 14421 } 14422 x := v.Args[0] 14423 v.reset(OpCopy) 14424 v.Type = x.Type 14425 v.AddArg(x) 14426 return true 14427 } 14428 return false 14429 } 14430 func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool { 14431 b := v.Block 14432 _ = b 14433 // match: (SHRL x (MOVQconst [c])) 14434 // cond: 14435 // result: (SHRLconst [c&31] x) 14436 for { 14437 x := v.Args[0] 14438 v_1 := v.Args[1] 14439 if v_1.Op != OpAMD64MOVQconst { 14440 break 14441 } 14442 c := v_1.AuxInt 14443 v.reset(OpAMD64SHRLconst) 14444 v.AuxInt = c & 31 14445 v.AddArg(x) 14446 return true 14447 } 14448 // match: (SHRL x (MOVLconst [c])) 14449 // cond: 14450 // result: (SHRLconst [c&31] x) 14451 for { 14452 x := v.Args[0] 14453 v_1 := v.Args[1] 14454 if v_1.Op != OpAMD64MOVLconst { 14455 break 14456 } 14457 c := v_1.AuxInt 14458 v.reset(OpAMD64SHRLconst) 14459 v.AuxInt = c & 31 14460 v.AddArg(x) 14461 return true 14462 } 14463 // match: (SHRL x (ANDLconst [31] y)) 14464 // cond: 14465 // result: (SHRL x y) 14466 for { 14467 x := v.Args[0] 14468 v_1 := v.Args[1] 14469 if v_1.Op != OpAMD64ANDLconst { 14470 break 14471 } 14472 if v_1.AuxInt != 31 { 14473 break 14474 } 14475 y := v_1.Args[0] 14476 v.reset(OpAMD64SHRL) 14477 v.AddArg(x) 14478 v.AddArg(y) 14479 return true 14480 } 14481 return false 14482 } 14483 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value, config *Config) bool { 14484 b := v.Block 14485 _ = b 14486 // match: (SHRLconst x [0]) 14487 // cond: 14488 // result: x 14489 for { 14490 if v.AuxInt != 0 { 14491 break 14492 } 14493 x := v.Args[0] 14494 v.reset(OpCopy) 14495 v.Type = x.Type 14496 v.AddArg(x) 14497 return true 14498 } 14499 return false 14500 } 14501 func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool { 14502 b := v.Block 14503 _ = b 14504 // match: (SHRQ x (MOVQconst [c])) 14505 // cond: 14506 // result: (SHRQconst [c&63] x) 14507 for { 14508 x := v.Args[0] 14509 v_1 := v.Args[1] 14510 if v_1.Op != OpAMD64MOVQconst { 14511 break 14512 } 14513 c := v_1.AuxInt 14514 v.reset(OpAMD64SHRQconst) 14515 v.AuxInt = c & 63 14516 v.AddArg(x) 14517 return true 14518 } 14519 // match: (SHRQ x (MOVLconst [c])) 14520 // cond: 14521 // result: (SHRQconst [c&63] x) 14522 for { 14523 x := v.Args[0] 14524 v_1 := v.Args[1] 14525 if v_1.Op != OpAMD64MOVLconst { 14526 break 14527 } 14528 c := v_1.AuxInt 14529 v.reset(OpAMD64SHRQconst) 14530 v.AuxInt = c & 63 14531 v.AddArg(x) 14532 return true 14533 } 14534 // match: (SHRQ x (ANDQconst [63] y)) 14535 // cond: 14536 // result: (SHRQ x y) 14537 for { 14538 x := v.Args[0] 14539 v_1 := v.Args[1] 14540 if v_1.Op != OpAMD64ANDQconst { 14541 break 14542 } 14543 if v_1.AuxInt != 63 { 14544 break 14545 } 14546 y := v_1.Args[0] 14547 v.reset(OpAMD64SHRQ) 14548 v.AddArg(x) 14549 v.AddArg(y) 14550 return true 14551 } 14552 return false 14553 } 14554 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value, config *Config) bool { 14555 b := v.Block 14556 _ = b 14557 // match: (SHRQconst x [0]) 14558 // cond: 14559 // result: x 14560 for { 14561 if v.AuxInt != 0 { 14562 break 14563 } 14564 x := v.Args[0] 14565 v.reset(OpCopy) 14566 v.Type = x.Type 14567 v.AddArg(x) 14568 return true 14569 } 14570 return false 14571 } 14572 func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool { 14573 b := v.Block 14574 _ = b 14575 // match: (SHRW x (MOVQconst [c])) 14576 // cond: c&31 < 16 14577 // result: (SHRWconst [c&31] x) 14578 for { 14579 x := v.Args[0] 14580 v_1 := v.Args[1] 14581 if v_1.Op != OpAMD64MOVQconst { 14582 break 14583 } 14584 c := v_1.AuxInt 14585 if !(c&31 < 16) { 14586 break 14587 } 14588 v.reset(OpAMD64SHRWconst) 14589 v.AuxInt = c & 31 14590 v.AddArg(x) 14591 return true 14592 } 14593 // match: (SHRW x (MOVLconst [c])) 14594 // cond: c&31 < 16 14595 // result: (SHRWconst [c&31] x) 14596 for { 14597 x := v.Args[0] 14598 v_1 := v.Args[1] 14599 if v_1.Op != OpAMD64MOVLconst { 14600 break 14601 } 14602 c := v_1.AuxInt 14603 if !(c&31 < 16) { 14604 break 14605 } 14606 v.reset(OpAMD64SHRWconst) 14607 v.AuxInt = c & 31 14608 v.AddArg(x) 14609 return true 14610 } 14611 // match: (SHRW _ (MOVQconst [c])) 14612 // cond: c&31 >= 16 14613 // result: (MOVLconst [0]) 14614 for { 14615 v_1 := v.Args[1] 14616 if v_1.Op != OpAMD64MOVQconst { 14617 break 14618 } 14619 c := v_1.AuxInt 14620 if !(c&31 >= 16) { 14621 break 14622 } 14623 v.reset(OpAMD64MOVLconst) 14624 v.AuxInt = 0 14625 return true 14626 } 14627 // match: (SHRW _ (MOVLconst [c])) 14628 // cond: c&31 >= 16 14629 // result: (MOVLconst [0]) 14630 for { 14631 v_1 := v.Args[1] 14632 if v_1.Op != OpAMD64MOVLconst { 14633 break 14634 } 14635 c := v_1.AuxInt 14636 if !(c&31 >= 16) { 14637 break 14638 } 14639 v.reset(OpAMD64MOVLconst) 14640 v.AuxInt = 0 14641 return true 14642 } 14643 return false 14644 } 14645 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value, config *Config) bool { 14646 b := v.Block 14647 _ = b 14648 // match: (SHRWconst x [0]) 14649 // cond: 14650 // result: x 14651 for { 14652 if v.AuxInt != 0 { 14653 break 14654 } 14655 x := v.Args[0] 14656 v.reset(OpCopy) 14657 v.Type = x.Type 14658 v.AddArg(x) 14659 return true 14660 } 14661 return false 14662 } 14663 func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool { 14664 b := v.Block 14665 _ = b 14666 // match: (SUBL x (MOVLconst [c])) 14667 // cond: 14668 // result: (SUBLconst x [c]) 14669 for { 14670 x := v.Args[0] 14671 v_1 := v.Args[1] 14672 if v_1.Op != OpAMD64MOVLconst { 14673 break 14674 } 14675 c := v_1.AuxInt 14676 v.reset(OpAMD64SUBLconst) 14677 v.AuxInt = c 14678 v.AddArg(x) 14679 return true 14680 } 14681 // match: (SUBL (MOVLconst [c]) x) 14682 // cond: 14683 // result: (NEGL (SUBLconst <v.Type> x [c])) 14684 for { 14685 v_0 := v.Args[0] 14686 if v_0.Op != OpAMD64MOVLconst { 14687 break 14688 } 14689 c := v_0.AuxInt 14690 x := v.Args[1] 14691 v.reset(OpAMD64NEGL) 14692 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 14693 v0.AuxInt = c 14694 v0.AddArg(x) 14695 v.AddArg(v0) 14696 return true 14697 } 14698 // match: (SUBL x x) 14699 // cond: 14700 // result: (MOVLconst [0]) 14701 for { 14702 x := v.Args[0] 14703 if x != v.Args[1] { 14704 break 14705 } 14706 v.reset(OpAMD64MOVLconst) 14707 v.AuxInt = 0 14708 return true 14709 } 14710 return false 14711 } 14712 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool { 14713 b := v.Block 14714 _ = b 14715 // match: (SUBLconst [c] x) 14716 // cond: int32(c) == 0 14717 // result: x 14718 for { 14719 c := v.AuxInt 14720 x := v.Args[0] 14721 if !(int32(c) == 0) { 14722 break 14723 } 14724 v.reset(OpCopy) 14725 v.Type = x.Type 14726 v.AddArg(x) 14727 return true 14728 } 14729 // match: (SUBLconst [c] x) 14730 // cond: 14731 // result: (ADDLconst [int64(int32(-c))] x) 14732 for { 14733 c := v.AuxInt 14734 x := v.Args[0] 14735 v.reset(OpAMD64ADDLconst) 14736 v.AuxInt = int64(int32(-c)) 14737 v.AddArg(x) 14738 return true 14739 } 14740 } 14741 func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool { 14742 b := v.Block 14743 _ = b 14744 // match: (SUBQ x (MOVQconst [c])) 14745 // cond: is32Bit(c) 14746 // result: (SUBQconst x [c]) 14747 for { 14748 x := v.Args[0] 14749 v_1 := v.Args[1] 14750 if v_1.Op != OpAMD64MOVQconst { 14751 break 14752 } 14753 c := v_1.AuxInt 14754 if !(is32Bit(c)) { 14755 break 14756 } 14757 v.reset(OpAMD64SUBQconst) 14758 v.AuxInt = c 14759 v.AddArg(x) 14760 return true 14761 } 14762 // match: (SUBQ (MOVQconst [c]) x) 14763 // cond: is32Bit(c) 14764 // result: (NEGQ (SUBQconst <v.Type> x [c])) 14765 for { 14766 v_0 := v.Args[0] 14767 if v_0.Op != OpAMD64MOVQconst { 14768 break 14769 } 14770 c := v_0.AuxInt 14771 x := v.Args[1] 14772 if !(is32Bit(c)) { 14773 break 14774 } 14775 v.reset(OpAMD64NEGQ) 14776 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 14777 v0.AuxInt = c 14778 v0.AddArg(x) 14779 v.AddArg(v0) 14780 return true 14781 } 14782 // match: (SUBQ x x) 14783 // cond: 14784 // result: (MOVQconst [0]) 14785 for { 14786 x := v.Args[0] 14787 if x != v.Args[1] { 14788 break 14789 } 14790 v.reset(OpAMD64MOVQconst) 14791 v.AuxInt = 0 14792 return true 14793 } 14794 return false 14795 } 14796 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool { 14797 b := v.Block 14798 _ = b 14799 // match: (SUBQconst [0] x) 14800 // cond: 14801 // result: x 14802 for { 14803 if v.AuxInt != 0 { 14804 break 14805 } 14806 x := v.Args[0] 14807 v.reset(OpCopy) 14808 v.Type = x.Type 14809 v.AddArg(x) 14810 return true 14811 } 14812 // match: (SUBQconst [c] x) 14813 // cond: c != -(1<<31) 14814 // result: (ADDQconst [-c] x) 14815 for { 14816 c := v.AuxInt 14817 x := v.Args[0] 14818 if !(c != -(1 << 31)) { 14819 break 14820 } 14821 v.reset(OpAMD64ADDQconst) 14822 v.AuxInt = -c 14823 v.AddArg(x) 14824 return true 14825 } 14826 // match: (SUBQconst (MOVQconst [d]) [c]) 14827 // cond: 14828 // result: (MOVQconst [d-c]) 14829 for { 14830 c := v.AuxInt 14831 v_0 := v.Args[0] 14832 if v_0.Op != OpAMD64MOVQconst { 14833 break 14834 } 14835 d := v_0.AuxInt 14836 v.reset(OpAMD64MOVQconst) 14837 v.AuxInt = d - c 14838 return true 14839 } 14840 // match: (SUBQconst (SUBQconst x [d]) [c]) 14841 // cond: is32Bit(-c-d) 14842 // result: (ADDQconst [-c-d] x) 14843 for { 14844 c := v.AuxInt 14845 v_0 := v.Args[0] 14846 if v_0.Op != OpAMD64SUBQconst { 14847 break 14848 } 14849 d := v_0.AuxInt 14850 x := v_0.Args[0] 14851 if !(is32Bit(-c - d)) { 14852 break 14853 } 14854 v.reset(OpAMD64ADDQconst) 14855 v.AuxInt = -c - d 14856 v.AddArg(x) 14857 return true 14858 } 14859 return false 14860 } 14861 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value, config *Config) bool { 14862 b := v.Block 14863 _ = b 14864 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 14865 // cond: is32Bit(off1+off2) 14866 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 14867 for { 14868 off1 := v.AuxInt 14869 sym := v.Aux 14870 val := v.Args[0] 14871 v_1 := v.Args[1] 14872 if v_1.Op != OpAMD64ADDQconst { 14873 break 14874 } 14875 off2 := v_1.AuxInt 14876 ptr := v_1.Args[0] 14877 mem := v.Args[2] 14878 if !(is32Bit(off1 + off2)) { 14879 break 14880 } 14881 v.reset(OpAMD64XADDLlock) 14882 v.AuxInt = off1 + off2 14883 v.Aux = sym 14884 v.AddArg(val) 14885 v.AddArg(ptr) 14886 v.AddArg(mem) 14887 return true 14888 } 14889 return false 14890 } 14891 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value, config *Config) bool { 14892 b := v.Block 14893 _ = b 14894 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 14895 // cond: is32Bit(off1+off2) 14896 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 14897 for { 14898 off1 := v.AuxInt 14899 sym := v.Aux 14900 val := v.Args[0] 14901 v_1 := v.Args[1] 14902 if v_1.Op != OpAMD64ADDQconst { 14903 break 14904 } 14905 off2 := v_1.AuxInt 14906 ptr := v_1.Args[0] 14907 mem := v.Args[2] 14908 if !(is32Bit(off1 + off2)) { 14909 break 14910 } 14911 v.reset(OpAMD64XADDQlock) 14912 v.AuxInt = off1 + off2 14913 v.Aux = sym 14914 v.AddArg(val) 14915 v.AddArg(ptr) 14916 v.AddArg(mem) 14917 return true 14918 } 14919 return false 14920 } 14921 func rewriteValueAMD64_OpAMD64XCHGL(v *Value, config *Config) bool { 14922 b := v.Block 14923 _ = b 14924 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 14925 // cond: is32Bit(off1+off2) 14926 // result: (XCHGL [off1+off2] {sym} val ptr mem) 14927 for { 14928 off1 := v.AuxInt 14929 sym := v.Aux 14930 val := v.Args[0] 14931 v_1 := v.Args[1] 14932 if v_1.Op != OpAMD64ADDQconst { 14933 break 14934 } 14935 off2 := v_1.AuxInt 14936 ptr := v_1.Args[0] 14937 mem := v.Args[2] 14938 if !(is32Bit(off1 + off2)) { 14939 break 14940 } 14941 v.reset(OpAMD64XCHGL) 14942 v.AuxInt = off1 + off2 14943 v.Aux = sym 14944 v.AddArg(val) 14945 v.AddArg(ptr) 14946 v.AddArg(mem) 14947 return true 14948 } 14949 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 14950 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 14951 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 14952 for { 14953 off1 := v.AuxInt 14954 sym1 := v.Aux 14955 val := v.Args[0] 14956 v_1 := v.Args[1] 14957 if v_1.Op != OpAMD64LEAQ { 14958 break 14959 } 14960 off2 := v_1.AuxInt 14961 sym2 := v_1.Aux 14962 ptr := v_1.Args[0] 14963 mem := v.Args[2] 14964 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 14965 break 14966 } 14967 v.reset(OpAMD64XCHGL) 14968 v.AuxInt = off1 + off2 14969 v.Aux = mergeSym(sym1, sym2) 14970 v.AddArg(val) 14971 v.AddArg(ptr) 14972 v.AddArg(mem) 14973 return true 14974 } 14975 return false 14976 } 14977 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value, config *Config) bool { 14978 b := v.Block 14979 _ = b 14980 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 14981 // cond: is32Bit(off1+off2) 14982 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 14983 for { 14984 off1 := v.AuxInt 14985 sym := v.Aux 14986 val := v.Args[0] 14987 v_1 := v.Args[1] 14988 if v_1.Op != OpAMD64ADDQconst { 14989 break 14990 } 14991 off2 := v_1.AuxInt 14992 ptr := v_1.Args[0] 14993 mem := v.Args[2] 14994 if !(is32Bit(off1 + off2)) { 14995 break 14996 } 14997 v.reset(OpAMD64XCHGQ) 14998 v.AuxInt = off1 + off2 14999 v.Aux = sym 15000 v.AddArg(val) 15001 v.AddArg(ptr) 15002 v.AddArg(mem) 15003 return true 15004 } 15005 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 15006 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 15007 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 15008 for { 15009 off1 := v.AuxInt 15010 sym1 := v.Aux 15011 val := v.Args[0] 15012 v_1 := v.Args[1] 15013 if v_1.Op != OpAMD64LEAQ { 15014 break 15015 } 15016 off2 := v_1.AuxInt 15017 sym2 := v_1.Aux 15018 ptr := v_1.Args[0] 15019 mem := v.Args[2] 15020 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 15021 break 15022 } 15023 v.reset(OpAMD64XCHGQ) 15024 v.AuxInt = off1 + off2 15025 v.Aux = mergeSym(sym1, sym2) 15026 v.AddArg(val) 15027 v.AddArg(ptr) 15028 v.AddArg(mem) 15029 return true 15030 } 15031 return false 15032 } 15033 func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool { 15034 b := v.Block 15035 _ = b 15036 // match: (XORL x (MOVLconst [c])) 15037 // cond: 15038 // result: (XORLconst [c] x) 15039 for { 15040 x := v.Args[0] 15041 v_1 := v.Args[1] 15042 if v_1.Op != OpAMD64MOVLconst { 15043 break 15044 } 15045 c := v_1.AuxInt 15046 v.reset(OpAMD64XORLconst) 15047 v.AuxInt = c 15048 v.AddArg(x) 15049 return true 15050 } 15051 // match: (XORL (MOVLconst [c]) x) 15052 // cond: 15053 // result: (XORLconst [c] x) 15054 for { 15055 v_0 := v.Args[0] 15056 if v_0.Op != OpAMD64MOVLconst { 15057 break 15058 } 15059 c := v_0.AuxInt 15060 x := v.Args[1] 15061 v.reset(OpAMD64XORLconst) 15062 v.AuxInt = c 15063 v.AddArg(x) 15064 return true 15065 } 15066 // match: (XORL (SHLLconst x [c]) (SHRLconst x [32-c])) 15067 // cond: 15068 // result: (ROLLconst x [ c]) 15069 for { 15070 v_0 := v.Args[0] 15071 if v_0.Op != OpAMD64SHLLconst { 15072 break 15073 } 15074 c := v_0.AuxInt 15075 x := v_0.Args[0] 15076 v_1 := v.Args[1] 15077 if v_1.Op != OpAMD64SHRLconst { 15078 break 15079 } 15080 if v_1.AuxInt != 32-c { 15081 break 15082 } 15083 if x != v_1.Args[0] { 15084 break 15085 } 15086 v.reset(OpAMD64ROLLconst) 15087 v.AuxInt = c 15088 v.AddArg(x) 15089 return true 15090 } 15091 // match: (XORL (SHRLconst x [c]) (SHLLconst x [32-c])) 15092 // cond: 15093 // result: (ROLLconst x [32-c]) 15094 for { 15095 v_0 := v.Args[0] 15096 if v_0.Op != OpAMD64SHRLconst { 15097 break 15098 } 15099 c := v_0.AuxInt 15100 x := v_0.Args[0] 15101 v_1 := v.Args[1] 15102 if v_1.Op != OpAMD64SHLLconst { 15103 break 15104 } 15105 if v_1.AuxInt != 32-c { 15106 break 15107 } 15108 if x != v_1.Args[0] { 15109 break 15110 } 15111 v.reset(OpAMD64ROLLconst) 15112 v.AuxInt = 32 - c 15113 v.AddArg(x) 15114 return true 15115 } 15116 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [16-c])) 15117 // cond: c < 16 && t.Size() == 2 15118 // result: (ROLWconst x [ c]) 15119 for { 15120 t := v.Type 15121 v_0 := v.Args[0] 15122 if v_0.Op != OpAMD64SHLLconst { 15123 break 15124 } 15125 c := v_0.AuxInt 15126 x := v_0.Args[0] 15127 v_1 := v.Args[1] 15128 if v_1.Op != OpAMD64SHRWconst { 15129 break 15130 } 15131 if v_1.AuxInt != 16-c { 15132 break 15133 } 15134 if x != v_1.Args[0] { 15135 break 15136 } 15137 if !(c < 16 && t.Size() == 2) { 15138 break 15139 } 15140 v.reset(OpAMD64ROLWconst) 15141 v.AuxInt = c 15142 v.AddArg(x) 15143 return true 15144 } 15145 // match: (XORL <t> (SHRWconst x [c]) (SHLLconst x [16-c])) 15146 // cond: c > 0 && t.Size() == 2 15147 // result: (ROLWconst x [16-c]) 15148 for { 15149 t := v.Type 15150 v_0 := v.Args[0] 15151 if v_0.Op != OpAMD64SHRWconst { 15152 break 15153 } 15154 c := v_0.AuxInt 15155 x := v_0.Args[0] 15156 v_1 := v.Args[1] 15157 if v_1.Op != OpAMD64SHLLconst { 15158 break 15159 } 15160 if v_1.AuxInt != 16-c { 15161 break 15162 } 15163 if x != v_1.Args[0] { 15164 break 15165 } 15166 if !(c > 0 && t.Size() == 2) { 15167 break 15168 } 15169 v.reset(OpAMD64ROLWconst) 15170 v.AuxInt = 16 - c 15171 v.AddArg(x) 15172 return true 15173 } 15174 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [ 8-c])) 15175 // cond: c < 8 && t.Size() == 1 15176 // result: (ROLBconst x [ c]) 15177 for { 15178 t := v.Type 15179 v_0 := v.Args[0] 15180 if v_0.Op != OpAMD64SHLLconst { 15181 break 15182 } 15183 c := v_0.AuxInt 15184 x := v_0.Args[0] 15185 v_1 := v.Args[1] 15186 if v_1.Op != OpAMD64SHRBconst { 15187 break 15188 } 15189 if v_1.AuxInt != 8-c { 15190 break 15191 } 15192 if x != v_1.Args[0] { 15193 break 15194 } 15195 if !(c < 8 && t.Size() == 1) { 15196 break 15197 } 15198 v.reset(OpAMD64ROLBconst) 15199 v.AuxInt = c 15200 v.AddArg(x) 15201 return true 15202 } 15203 // match: (XORL <t> (SHRBconst x [c]) (SHLLconst x [ 8-c])) 15204 // cond: c > 0 && t.Size() == 1 15205 // result: (ROLBconst x [ 8-c]) 15206 for { 15207 t := v.Type 15208 v_0 := v.Args[0] 15209 if v_0.Op != OpAMD64SHRBconst { 15210 break 15211 } 15212 c := v_0.AuxInt 15213 x := v_0.Args[0] 15214 v_1 := v.Args[1] 15215 if v_1.Op != OpAMD64SHLLconst { 15216 break 15217 } 15218 if v_1.AuxInt != 8-c { 15219 break 15220 } 15221 if x != v_1.Args[0] { 15222 break 15223 } 15224 if !(c > 0 && t.Size() == 1) { 15225 break 15226 } 15227 v.reset(OpAMD64ROLBconst) 15228 v.AuxInt = 8 - c 15229 v.AddArg(x) 15230 return true 15231 } 15232 // match: (XORL x x) 15233 // cond: 15234 // result: (MOVLconst [0]) 15235 for { 15236 x := v.Args[0] 15237 if x != v.Args[1] { 15238 break 15239 } 15240 v.reset(OpAMD64MOVLconst) 15241 v.AuxInt = 0 15242 return true 15243 } 15244 return false 15245 } 15246 func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool { 15247 b := v.Block 15248 _ = b 15249 // match: (XORLconst [c] (XORLconst [d] x)) 15250 // cond: 15251 // result: (XORLconst [c ^ d] x) 15252 for { 15253 c := v.AuxInt 15254 v_0 := v.Args[0] 15255 if v_0.Op != OpAMD64XORLconst { 15256 break 15257 } 15258 d := v_0.AuxInt 15259 x := v_0.Args[0] 15260 v.reset(OpAMD64XORLconst) 15261 v.AuxInt = c ^ d 15262 v.AddArg(x) 15263 return true 15264 } 15265 // match: (XORLconst [c] x) 15266 // cond: int32(c)==0 15267 // result: x 15268 for { 15269 c := v.AuxInt 15270 x := v.Args[0] 15271 if !(int32(c) == 0) { 15272 break 15273 } 15274 v.reset(OpCopy) 15275 v.Type = x.Type 15276 v.AddArg(x) 15277 return true 15278 } 15279 // match: (XORLconst [c] (MOVLconst [d])) 15280 // cond: 15281 // result: (MOVLconst [c^d]) 15282 for { 15283 c := v.AuxInt 15284 v_0 := v.Args[0] 15285 if v_0.Op != OpAMD64MOVLconst { 15286 break 15287 } 15288 d := v_0.AuxInt 15289 v.reset(OpAMD64MOVLconst) 15290 v.AuxInt = c ^ d 15291 return true 15292 } 15293 return false 15294 } 15295 func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool { 15296 b := v.Block 15297 _ = b 15298 // match: (XORQ x (MOVQconst [c])) 15299 // cond: is32Bit(c) 15300 // result: (XORQconst [c] x) 15301 for { 15302 x := v.Args[0] 15303 v_1 := v.Args[1] 15304 if v_1.Op != OpAMD64MOVQconst { 15305 break 15306 } 15307 c := v_1.AuxInt 15308 if !(is32Bit(c)) { 15309 break 15310 } 15311 v.reset(OpAMD64XORQconst) 15312 v.AuxInt = c 15313 v.AddArg(x) 15314 return true 15315 } 15316 // match: (XORQ (MOVQconst [c]) x) 15317 // cond: is32Bit(c) 15318 // result: (XORQconst [c] x) 15319 for { 15320 v_0 := v.Args[0] 15321 if v_0.Op != OpAMD64MOVQconst { 15322 break 15323 } 15324 c := v_0.AuxInt 15325 x := v.Args[1] 15326 if !(is32Bit(c)) { 15327 break 15328 } 15329 v.reset(OpAMD64XORQconst) 15330 v.AuxInt = c 15331 v.AddArg(x) 15332 return true 15333 } 15334 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [64-c])) 15335 // cond: 15336 // result: (ROLQconst x [ c]) 15337 for { 15338 v_0 := v.Args[0] 15339 if v_0.Op != OpAMD64SHLQconst { 15340 break 15341 } 15342 c := v_0.AuxInt 15343 x := v_0.Args[0] 15344 v_1 := v.Args[1] 15345 if v_1.Op != OpAMD64SHRQconst { 15346 break 15347 } 15348 if v_1.AuxInt != 64-c { 15349 break 15350 } 15351 if x != v_1.Args[0] { 15352 break 15353 } 15354 v.reset(OpAMD64ROLQconst) 15355 v.AuxInt = c 15356 v.AddArg(x) 15357 return true 15358 } 15359 // match: (XORQ (SHRQconst x [c]) (SHLQconst x [64-c])) 15360 // cond: 15361 // result: (ROLQconst x [64-c]) 15362 for { 15363 v_0 := v.Args[0] 15364 if v_0.Op != OpAMD64SHRQconst { 15365 break 15366 } 15367 c := v_0.AuxInt 15368 x := v_0.Args[0] 15369 v_1 := v.Args[1] 15370 if v_1.Op != OpAMD64SHLQconst { 15371 break 15372 } 15373 if v_1.AuxInt != 64-c { 15374 break 15375 } 15376 if x != v_1.Args[0] { 15377 break 15378 } 15379 v.reset(OpAMD64ROLQconst) 15380 v.AuxInt = 64 - c 15381 v.AddArg(x) 15382 return true 15383 } 15384 // match: (XORQ x x) 15385 // cond: 15386 // result: (MOVQconst [0]) 15387 for { 15388 x := v.Args[0] 15389 if x != v.Args[1] { 15390 break 15391 } 15392 v.reset(OpAMD64MOVQconst) 15393 v.AuxInt = 0 15394 return true 15395 } 15396 return false 15397 } 15398 func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool { 15399 b := v.Block 15400 _ = b 15401 // match: (XORQconst [c] (XORQconst [d] x)) 15402 // cond: 15403 // result: (XORQconst [c ^ d] x) 15404 for { 15405 c := v.AuxInt 15406 v_0 := v.Args[0] 15407 if v_0.Op != OpAMD64XORQconst { 15408 break 15409 } 15410 d := v_0.AuxInt 15411 x := v_0.Args[0] 15412 v.reset(OpAMD64XORQconst) 15413 v.AuxInt = c ^ d 15414 v.AddArg(x) 15415 return true 15416 } 15417 // match: (XORQconst [0] x) 15418 // cond: 15419 // result: x 15420 for { 15421 if v.AuxInt != 0 { 15422 break 15423 } 15424 x := v.Args[0] 15425 v.reset(OpCopy) 15426 v.Type = x.Type 15427 v.AddArg(x) 15428 return true 15429 } 15430 // match: (XORQconst [c] (MOVQconst [d])) 15431 // cond: 15432 // result: (MOVQconst [c^d]) 15433 for { 15434 c := v.AuxInt 15435 v_0 := v.Args[0] 15436 if v_0.Op != OpAMD64MOVQconst { 15437 break 15438 } 15439 d := v_0.AuxInt 15440 v.reset(OpAMD64MOVQconst) 15441 v.AuxInt = c ^ d 15442 return true 15443 } 15444 return false 15445 } 15446 func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { 15447 b := v.Block 15448 _ = b 15449 // match: (Add16 x y) 15450 // cond: 15451 // result: (ADDL x y) 15452 for { 15453 x := v.Args[0] 15454 y := v.Args[1] 15455 v.reset(OpAMD64ADDL) 15456 v.AddArg(x) 15457 v.AddArg(y) 15458 return true 15459 } 15460 } 15461 func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool { 15462 b := v.Block 15463 _ = b 15464 // match: (Add32 x y) 15465 // cond: 15466 // result: (ADDL x y) 15467 for { 15468 x := v.Args[0] 15469 y := v.Args[1] 15470 v.reset(OpAMD64ADDL) 15471 v.AddArg(x) 15472 v.AddArg(y) 15473 return true 15474 } 15475 } 15476 func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool { 15477 b := v.Block 15478 _ = b 15479 // match: (Add32F x y) 15480 // cond: 15481 // result: (ADDSS x y) 15482 for { 15483 x := v.Args[0] 15484 y := v.Args[1] 15485 v.reset(OpAMD64ADDSS) 15486 v.AddArg(x) 15487 v.AddArg(y) 15488 return true 15489 } 15490 } 15491 func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool { 15492 b := v.Block 15493 _ = b 15494 // match: (Add64 x y) 15495 // cond: 15496 // result: (ADDQ x y) 15497 for { 15498 x := v.Args[0] 15499 y := v.Args[1] 15500 v.reset(OpAMD64ADDQ) 15501 v.AddArg(x) 15502 v.AddArg(y) 15503 return true 15504 } 15505 } 15506 func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool { 15507 b := v.Block 15508 _ = b 15509 // match: (Add64F x y) 15510 // cond: 15511 // result: (ADDSD x y) 15512 for { 15513 x := v.Args[0] 15514 y := v.Args[1] 15515 v.reset(OpAMD64ADDSD) 15516 v.AddArg(x) 15517 v.AddArg(y) 15518 return true 15519 } 15520 } 15521 func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool { 15522 b := v.Block 15523 _ = b 15524 // match: (Add8 x y) 15525 // cond: 15526 // result: (ADDL x y) 15527 for { 15528 x := v.Args[0] 15529 y := v.Args[1] 15530 v.reset(OpAMD64ADDL) 15531 v.AddArg(x) 15532 v.AddArg(y) 15533 return true 15534 } 15535 } 15536 func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool { 15537 b := v.Block 15538 _ = b 15539 // match: (AddPtr x y) 15540 // cond: config.PtrSize == 8 15541 // result: (ADDQ x y) 15542 for { 15543 x := v.Args[0] 15544 y := v.Args[1] 15545 if !(config.PtrSize == 8) { 15546 break 15547 } 15548 v.reset(OpAMD64ADDQ) 15549 v.AddArg(x) 15550 v.AddArg(y) 15551 return true 15552 } 15553 // match: (AddPtr x y) 15554 // cond: config.PtrSize == 4 15555 // result: (ADDL x y) 15556 for { 15557 x := v.Args[0] 15558 y := v.Args[1] 15559 if !(config.PtrSize == 4) { 15560 break 15561 } 15562 v.reset(OpAMD64ADDL) 15563 v.AddArg(x) 15564 v.AddArg(y) 15565 return true 15566 } 15567 return false 15568 } 15569 func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool { 15570 b := v.Block 15571 _ = b 15572 // match: (Addr {sym} base) 15573 // cond: config.PtrSize == 8 15574 // result: (LEAQ {sym} base) 15575 for { 15576 sym := v.Aux 15577 base := v.Args[0] 15578 if !(config.PtrSize == 8) { 15579 break 15580 } 15581 v.reset(OpAMD64LEAQ) 15582 v.Aux = sym 15583 v.AddArg(base) 15584 return true 15585 } 15586 // match: (Addr {sym} base) 15587 // cond: config.PtrSize == 4 15588 // result: (LEAL {sym} base) 15589 for { 15590 sym := v.Aux 15591 base := v.Args[0] 15592 if !(config.PtrSize == 4) { 15593 break 15594 } 15595 v.reset(OpAMD64LEAL) 15596 v.Aux = sym 15597 v.AddArg(base) 15598 return true 15599 } 15600 return false 15601 } 15602 func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool { 15603 b := v.Block 15604 _ = b 15605 // match: (And16 x y) 15606 // cond: 15607 // result: (ANDL x y) 15608 for { 15609 x := v.Args[0] 15610 y := v.Args[1] 15611 v.reset(OpAMD64ANDL) 15612 v.AddArg(x) 15613 v.AddArg(y) 15614 return true 15615 } 15616 } 15617 func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool { 15618 b := v.Block 15619 _ = b 15620 // match: (And32 x y) 15621 // cond: 15622 // result: (ANDL x y) 15623 for { 15624 x := v.Args[0] 15625 y := v.Args[1] 15626 v.reset(OpAMD64ANDL) 15627 v.AddArg(x) 15628 v.AddArg(y) 15629 return true 15630 } 15631 } 15632 func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool { 15633 b := v.Block 15634 _ = b 15635 // match: (And64 x y) 15636 // cond: 15637 // result: (ANDQ x y) 15638 for { 15639 x := v.Args[0] 15640 y := v.Args[1] 15641 v.reset(OpAMD64ANDQ) 15642 v.AddArg(x) 15643 v.AddArg(y) 15644 return true 15645 } 15646 } 15647 func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool { 15648 b := v.Block 15649 _ = b 15650 // match: (And8 x y) 15651 // cond: 15652 // result: (ANDL x y) 15653 for { 15654 x := v.Args[0] 15655 y := v.Args[1] 15656 v.reset(OpAMD64ANDL) 15657 v.AddArg(x) 15658 v.AddArg(y) 15659 return true 15660 } 15661 } 15662 func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool { 15663 b := v.Block 15664 _ = b 15665 // match: (AndB x y) 15666 // cond: 15667 // result: (ANDL x y) 15668 for { 15669 x := v.Args[0] 15670 y := v.Args[1] 15671 v.reset(OpAMD64ANDL) 15672 v.AddArg(x) 15673 v.AddArg(y) 15674 return true 15675 } 15676 } 15677 func rewriteValueAMD64_OpAtomicAdd32(v *Value, config *Config) bool { 15678 b := v.Block 15679 _ = b 15680 // match: (AtomicAdd32 ptr val mem) 15681 // cond: 15682 // result: (AddTupleFirst32 (XADDLlock val ptr mem) val) 15683 for { 15684 ptr := v.Args[0] 15685 val := v.Args[1] 15686 mem := v.Args[2] 15687 v.reset(OpAMD64AddTupleFirst32) 15688 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem)) 15689 v0.AddArg(val) 15690 v0.AddArg(ptr) 15691 v0.AddArg(mem) 15692 v.AddArg(v0) 15693 v.AddArg(val) 15694 return true 15695 } 15696 } 15697 func rewriteValueAMD64_OpAtomicAdd64(v *Value, config *Config) bool { 15698 b := v.Block 15699 _ = b 15700 // match: (AtomicAdd64 ptr val mem) 15701 // cond: 15702 // result: (AddTupleFirst64 (XADDQlock val ptr mem) val) 15703 for { 15704 ptr := v.Args[0] 15705 val := v.Args[1] 15706 mem := v.Args[2] 15707 v.reset(OpAMD64AddTupleFirst64) 15708 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem)) 15709 v0.AddArg(val) 15710 v0.AddArg(ptr) 15711 v0.AddArg(mem) 15712 v.AddArg(v0) 15713 v.AddArg(val) 15714 return true 15715 } 15716 } 15717 func rewriteValueAMD64_OpAtomicAnd8(v *Value, config *Config) bool { 15718 b := v.Block 15719 _ = b 15720 // match: (AtomicAnd8 ptr val mem) 15721 // cond: 15722 // result: (ANDBlock ptr val mem) 15723 for { 15724 ptr := v.Args[0] 15725 val := v.Args[1] 15726 mem := v.Args[2] 15727 v.reset(OpAMD64ANDBlock) 15728 v.AddArg(ptr) 15729 v.AddArg(val) 15730 v.AddArg(mem) 15731 return true 15732 } 15733 } 15734 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value, config *Config) bool { 15735 b := v.Block 15736 _ = b 15737 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 15738 // cond: 15739 // result: (CMPXCHGLlock ptr old new_ mem) 15740 for { 15741 ptr := v.Args[0] 15742 old := v.Args[1] 15743 new_ := v.Args[2] 15744 mem := v.Args[3] 15745 v.reset(OpAMD64CMPXCHGLlock) 15746 v.AddArg(ptr) 15747 v.AddArg(old) 15748 v.AddArg(new_) 15749 v.AddArg(mem) 15750 return true 15751 } 15752 } 15753 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value, config *Config) bool { 15754 b := v.Block 15755 _ = b 15756 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 15757 // cond: 15758 // result: (CMPXCHGQlock ptr old new_ mem) 15759 for { 15760 ptr := v.Args[0] 15761 old := v.Args[1] 15762 new_ := v.Args[2] 15763 mem := v.Args[3] 15764 v.reset(OpAMD64CMPXCHGQlock) 15765 v.AddArg(ptr) 15766 v.AddArg(old) 15767 v.AddArg(new_) 15768 v.AddArg(mem) 15769 return true 15770 } 15771 } 15772 func rewriteValueAMD64_OpAtomicExchange32(v *Value, config *Config) bool { 15773 b := v.Block 15774 _ = b 15775 // match: (AtomicExchange32 ptr val mem) 15776 // cond: 15777 // result: (XCHGL val ptr mem) 15778 for { 15779 ptr := v.Args[0] 15780 val := v.Args[1] 15781 mem := v.Args[2] 15782 v.reset(OpAMD64XCHGL) 15783 v.AddArg(val) 15784 v.AddArg(ptr) 15785 v.AddArg(mem) 15786 return true 15787 } 15788 } 15789 func rewriteValueAMD64_OpAtomicExchange64(v *Value, config *Config) bool { 15790 b := v.Block 15791 _ = b 15792 // match: (AtomicExchange64 ptr val mem) 15793 // cond: 15794 // result: (XCHGQ val ptr mem) 15795 for { 15796 ptr := v.Args[0] 15797 val := v.Args[1] 15798 mem := v.Args[2] 15799 v.reset(OpAMD64XCHGQ) 15800 v.AddArg(val) 15801 v.AddArg(ptr) 15802 v.AddArg(mem) 15803 return true 15804 } 15805 } 15806 func rewriteValueAMD64_OpAtomicLoad32(v *Value, config *Config) bool { 15807 b := v.Block 15808 _ = b 15809 // match: (AtomicLoad32 ptr mem) 15810 // cond: 15811 // result: (MOVLatomicload ptr mem) 15812 for { 15813 ptr := v.Args[0] 15814 mem := v.Args[1] 15815 v.reset(OpAMD64MOVLatomicload) 15816 v.AddArg(ptr) 15817 v.AddArg(mem) 15818 return true 15819 } 15820 } 15821 func rewriteValueAMD64_OpAtomicLoad64(v *Value, config *Config) bool { 15822 b := v.Block 15823 _ = b 15824 // match: (AtomicLoad64 ptr mem) 15825 // cond: 15826 // result: (MOVQatomicload ptr mem) 15827 for { 15828 ptr := v.Args[0] 15829 mem := v.Args[1] 15830 v.reset(OpAMD64MOVQatomicload) 15831 v.AddArg(ptr) 15832 v.AddArg(mem) 15833 return true 15834 } 15835 } 15836 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value, config *Config) bool { 15837 b := v.Block 15838 _ = b 15839 // match: (AtomicLoadPtr ptr mem) 15840 // cond: config.PtrSize == 8 15841 // result: (MOVQatomicload ptr mem) 15842 for { 15843 ptr := v.Args[0] 15844 mem := v.Args[1] 15845 if !(config.PtrSize == 8) { 15846 break 15847 } 15848 v.reset(OpAMD64MOVQatomicload) 15849 v.AddArg(ptr) 15850 v.AddArg(mem) 15851 return true 15852 } 15853 // match: (AtomicLoadPtr ptr mem) 15854 // cond: config.PtrSize == 4 15855 // result: (MOVLatomicload ptr mem) 15856 for { 15857 ptr := v.Args[0] 15858 mem := v.Args[1] 15859 if !(config.PtrSize == 4) { 15860 break 15861 } 15862 v.reset(OpAMD64MOVLatomicload) 15863 v.AddArg(ptr) 15864 v.AddArg(mem) 15865 return true 15866 } 15867 return false 15868 } 15869 func rewriteValueAMD64_OpAtomicOr8(v *Value, config *Config) bool { 15870 b := v.Block 15871 _ = b 15872 // match: (AtomicOr8 ptr val mem) 15873 // cond: 15874 // result: (ORBlock ptr val mem) 15875 for { 15876 ptr := v.Args[0] 15877 val := v.Args[1] 15878 mem := v.Args[2] 15879 v.reset(OpAMD64ORBlock) 15880 v.AddArg(ptr) 15881 v.AddArg(val) 15882 v.AddArg(mem) 15883 return true 15884 } 15885 } 15886 func rewriteValueAMD64_OpAtomicStore32(v *Value, config *Config) bool { 15887 b := v.Block 15888 _ = b 15889 // match: (AtomicStore32 ptr val mem) 15890 // cond: 15891 // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeUInt32(),TypeMem)> val ptr mem)) 15892 for { 15893 ptr := v.Args[0] 15894 val := v.Args[1] 15895 mem := v.Args[2] 15896 v.reset(OpSelect1) 15897 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeUInt32(), TypeMem)) 15898 v0.AddArg(val) 15899 v0.AddArg(ptr) 15900 v0.AddArg(mem) 15901 v.AddArg(v0) 15902 return true 15903 } 15904 } 15905 func rewriteValueAMD64_OpAtomicStore64(v *Value, config *Config) bool { 15906 b := v.Block 15907 _ = b 15908 // match: (AtomicStore64 ptr val mem) 15909 // cond: 15910 // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeUInt64(),TypeMem)> val ptr mem)) 15911 for { 15912 ptr := v.Args[0] 15913 val := v.Args[1] 15914 mem := v.Args[2] 15915 v.reset(OpSelect1) 15916 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeUInt64(), TypeMem)) 15917 v0.AddArg(val) 15918 v0.AddArg(ptr) 15919 v0.AddArg(mem) 15920 v.AddArg(v0) 15921 return true 15922 } 15923 } 15924 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value, config *Config) bool { 15925 b := v.Block 15926 _ = b 15927 // match: (AtomicStorePtrNoWB ptr val mem) 15928 // cond: config.PtrSize == 8 15929 // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 15930 for { 15931 ptr := v.Args[0] 15932 val := v.Args[1] 15933 mem := v.Args[2] 15934 if !(config.PtrSize == 8) { 15935 break 15936 } 15937 v.reset(OpSelect1) 15938 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem)) 15939 v0.AddArg(val) 15940 v0.AddArg(ptr) 15941 v0.AddArg(mem) 15942 v.AddArg(v0) 15943 return true 15944 } 15945 // match: (AtomicStorePtrNoWB ptr val mem) 15946 // cond: config.PtrSize == 4 15947 // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 15948 for { 15949 ptr := v.Args[0] 15950 val := v.Args[1] 15951 mem := v.Args[2] 15952 if !(config.PtrSize == 4) { 15953 break 15954 } 15955 v.reset(OpSelect1) 15956 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem)) 15957 v0.AddArg(val) 15958 v0.AddArg(ptr) 15959 v0.AddArg(mem) 15960 v.AddArg(v0) 15961 return true 15962 } 15963 return false 15964 } 15965 func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool { 15966 b := v.Block 15967 _ = b 15968 // match: (Avg64u x y) 15969 // cond: 15970 // result: (AVGQU x y) 15971 for { 15972 x := v.Args[0] 15973 y := v.Args[1] 15974 v.reset(OpAMD64AVGQU) 15975 v.AddArg(x) 15976 v.AddArg(y) 15977 return true 15978 } 15979 } 15980 func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool { 15981 b := v.Block 15982 _ = b 15983 // match: (Bswap32 x) 15984 // cond: 15985 // result: (BSWAPL x) 15986 for { 15987 x := v.Args[0] 15988 v.reset(OpAMD64BSWAPL) 15989 v.AddArg(x) 15990 return true 15991 } 15992 } 15993 func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool { 15994 b := v.Block 15995 _ = b 15996 // match: (Bswap64 x) 15997 // cond: 15998 // result: (BSWAPQ x) 15999 for { 16000 x := v.Args[0] 16001 v.reset(OpAMD64BSWAPQ) 16002 v.AddArg(x) 16003 return true 16004 } 16005 } 16006 func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool { 16007 b := v.Block 16008 _ = b 16009 // match: (ClosureCall [argwid] entry closure mem) 16010 // cond: 16011 // result: (CALLclosure [argwid] entry closure mem) 16012 for { 16013 argwid := v.AuxInt 16014 entry := v.Args[0] 16015 closure := v.Args[1] 16016 mem := v.Args[2] 16017 v.reset(OpAMD64CALLclosure) 16018 v.AuxInt = argwid 16019 v.AddArg(entry) 16020 v.AddArg(closure) 16021 v.AddArg(mem) 16022 return true 16023 } 16024 } 16025 func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool { 16026 b := v.Block 16027 _ = b 16028 // match: (Com16 x) 16029 // cond: 16030 // result: (NOTL x) 16031 for { 16032 x := v.Args[0] 16033 v.reset(OpAMD64NOTL) 16034 v.AddArg(x) 16035 return true 16036 } 16037 } 16038 func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool { 16039 b := v.Block 16040 _ = b 16041 // match: (Com32 x) 16042 // cond: 16043 // result: (NOTL x) 16044 for { 16045 x := v.Args[0] 16046 v.reset(OpAMD64NOTL) 16047 v.AddArg(x) 16048 return true 16049 } 16050 } 16051 func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool { 16052 b := v.Block 16053 _ = b 16054 // match: (Com64 x) 16055 // cond: 16056 // result: (NOTQ x) 16057 for { 16058 x := v.Args[0] 16059 v.reset(OpAMD64NOTQ) 16060 v.AddArg(x) 16061 return true 16062 } 16063 } 16064 func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool { 16065 b := v.Block 16066 _ = b 16067 // match: (Com8 x) 16068 // cond: 16069 // result: (NOTL x) 16070 for { 16071 x := v.Args[0] 16072 v.reset(OpAMD64NOTL) 16073 v.AddArg(x) 16074 return true 16075 } 16076 } 16077 func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool { 16078 b := v.Block 16079 _ = b 16080 // match: (Const16 [val]) 16081 // cond: 16082 // result: (MOVLconst [val]) 16083 for { 16084 val := v.AuxInt 16085 v.reset(OpAMD64MOVLconst) 16086 v.AuxInt = val 16087 return true 16088 } 16089 } 16090 func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool { 16091 b := v.Block 16092 _ = b 16093 // match: (Const32 [val]) 16094 // cond: 16095 // result: (MOVLconst [val]) 16096 for { 16097 val := v.AuxInt 16098 v.reset(OpAMD64MOVLconst) 16099 v.AuxInt = val 16100 return true 16101 } 16102 } 16103 func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool { 16104 b := v.Block 16105 _ = b 16106 // match: (Const32F [val]) 16107 // cond: 16108 // result: (MOVSSconst [val]) 16109 for { 16110 val := v.AuxInt 16111 v.reset(OpAMD64MOVSSconst) 16112 v.AuxInt = val 16113 return true 16114 } 16115 } 16116 func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool { 16117 b := v.Block 16118 _ = b 16119 // match: (Const64 [val]) 16120 // cond: 16121 // result: (MOVQconst [val]) 16122 for { 16123 val := v.AuxInt 16124 v.reset(OpAMD64MOVQconst) 16125 v.AuxInt = val 16126 return true 16127 } 16128 } 16129 func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool { 16130 b := v.Block 16131 _ = b 16132 // match: (Const64F [val]) 16133 // cond: 16134 // result: (MOVSDconst [val]) 16135 for { 16136 val := v.AuxInt 16137 v.reset(OpAMD64MOVSDconst) 16138 v.AuxInt = val 16139 return true 16140 } 16141 } 16142 func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool { 16143 b := v.Block 16144 _ = b 16145 // match: (Const8 [val]) 16146 // cond: 16147 // result: (MOVLconst [val]) 16148 for { 16149 val := v.AuxInt 16150 v.reset(OpAMD64MOVLconst) 16151 v.AuxInt = val 16152 return true 16153 } 16154 } 16155 func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool { 16156 b := v.Block 16157 _ = b 16158 // match: (ConstBool [b]) 16159 // cond: 16160 // result: (MOVLconst [b]) 16161 for { 16162 b := v.AuxInt 16163 v.reset(OpAMD64MOVLconst) 16164 v.AuxInt = b 16165 return true 16166 } 16167 } 16168 func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool { 16169 b := v.Block 16170 _ = b 16171 // match: (ConstNil) 16172 // cond: config.PtrSize == 8 16173 // result: (MOVQconst [0]) 16174 for { 16175 if !(config.PtrSize == 8) { 16176 break 16177 } 16178 v.reset(OpAMD64MOVQconst) 16179 v.AuxInt = 0 16180 return true 16181 } 16182 // match: (ConstNil) 16183 // cond: config.PtrSize == 4 16184 // result: (MOVLconst [0]) 16185 for { 16186 if !(config.PtrSize == 4) { 16187 break 16188 } 16189 v.reset(OpAMD64MOVLconst) 16190 v.AuxInt = 0 16191 return true 16192 } 16193 return false 16194 } 16195 func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool { 16196 b := v.Block 16197 _ = b 16198 // match: (Convert <t> x mem) 16199 // cond: config.PtrSize == 8 16200 // result: (MOVQconvert <t> x mem) 16201 for { 16202 t := v.Type 16203 x := v.Args[0] 16204 mem := v.Args[1] 16205 if !(config.PtrSize == 8) { 16206 break 16207 } 16208 v.reset(OpAMD64MOVQconvert) 16209 v.Type = t 16210 v.AddArg(x) 16211 v.AddArg(mem) 16212 return true 16213 } 16214 // match: (Convert <t> x mem) 16215 // cond: config.PtrSize == 4 16216 // result: (MOVLconvert <t> x mem) 16217 for { 16218 t := v.Type 16219 x := v.Args[0] 16220 mem := v.Args[1] 16221 if !(config.PtrSize == 4) { 16222 break 16223 } 16224 v.reset(OpAMD64MOVLconvert) 16225 v.Type = t 16226 v.AddArg(x) 16227 v.AddArg(mem) 16228 return true 16229 } 16230 return false 16231 } 16232 func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool { 16233 b := v.Block 16234 _ = b 16235 // match: (Ctz32 <t> x) 16236 // cond: 16237 // result: (CMOVLEQ (Select0 <t> (BSFL x)) (MOVLconst <t> [32]) (Select1 <TypeFlags> (BSFL x))) 16238 for { 16239 t := v.Type 16240 x := v.Args[0] 16241 v.reset(OpAMD64CMOVLEQ) 16242 v0 := b.NewValue0(v.Pos, OpSelect0, t) 16243 v1 := b.NewValue0(v.Pos, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags)) 16244 v1.AddArg(x) 16245 v0.AddArg(v1) 16246 v.AddArg(v0) 16247 v2 := b.NewValue0(v.Pos, OpAMD64MOVLconst, t) 16248 v2.AuxInt = 32 16249 v.AddArg(v2) 16250 v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags) 16251 v4 := b.NewValue0(v.Pos, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags)) 16252 v4.AddArg(x) 16253 v3.AddArg(v4) 16254 v.AddArg(v3) 16255 return true 16256 } 16257 } 16258 func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool { 16259 b := v.Block 16260 _ = b 16261 // match: (Ctz64 <t> x) 16262 // cond: 16263 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x))) 16264 for { 16265 t := v.Type 16266 x := v.Args[0] 16267 v.reset(OpAMD64CMOVQEQ) 16268 v0 := b.NewValue0(v.Pos, OpSelect0, t) 16269 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags)) 16270 v1.AddArg(x) 16271 v0.AddArg(v1) 16272 v.AddArg(v0) 16273 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 16274 v2.AuxInt = 64 16275 v.AddArg(v2) 16276 v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags) 16277 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags)) 16278 v4.AddArg(x) 16279 v3.AddArg(v4) 16280 v.AddArg(v3) 16281 return true 16282 } 16283 } 16284 func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool { 16285 b := v.Block 16286 _ = b 16287 // match: (Cvt32Fto32 x) 16288 // cond: 16289 // result: (CVTTSS2SL x) 16290 for { 16291 x := v.Args[0] 16292 v.reset(OpAMD64CVTTSS2SL) 16293 v.AddArg(x) 16294 return true 16295 } 16296 } 16297 func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool { 16298 b := v.Block 16299 _ = b 16300 // match: (Cvt32Fto64 x) 16301 // cond: 16302 // result: (CVTTSS2SQ x) 16303 for { 16304 x := v.Args[0] 16305 v.reset(OpAMD64CVTTSS2SQ) 16306 v.AddArg(x) 16307 return true 16308 } 16309 } 16310 func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool { 16311 b := v.Block 16312 _ = b 16313 // match: (Cvt32Fto64F x) 16314 // cond: 16315 // result: (CVTSS2SD x) 16316 for { 16317 x := v.Args[0] 16318 v.reset(OpAMD64CVTSS2SD) 16319 v.AddArg(x) 16320 return true 16321 } 16322 } 16323 func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool { 16324 b := v.Block 16325 _ = b 16326 // match: (Cvt32to32F x) 16327 // cond: 16328 // result: (CVTSL2SS x) 16329 for { 16330 x := v.Args[0] 16331 v.reset(OpAMD64CVTSL2SS) 16332 v.AddArg(x) 16333 return true 16334 } 16335 } 16336 func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool { 16337 b := v.Block 16338 _ = b 16339 // match: (Cvt32to64F x) 16340 // cond: 16341 // result: (CVTSL2SD x) 16342 for { 16343 x := v.Args[0] 16344 v.reset(OpAMD64CVTSL2SD) 16345 v.AddArg(x) 16346 return true 16347 } 16348 } 16349 func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool { 16350 b := v.Block 16351 _ = b 16352 // match: (Cvt64Fto32 x) 16353 // cond: 16354 // result: (CVTTSD2SL x) 16355 for { 16356 x := v.Args[0] 16357 v.reset(OpAMD64CVTTSD2SL) 16358 v.AddArg(x) 16359 return true 16360 } 16361 } 16362 func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool { 16363 b := v.Block 16364 _ = b 16365 // match: (Cvt64Fto32F x) 16366 // cond: 16367 // result: (CVTSD2SS x) 16368 for { 16369 x := v.Args[0] 16370 v.reset(OpAMD64CVTSD2SS) 16371 v.AddArg(x) 16372 return true 16373 } 16374 } 16375 func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool { 16376 b := v.Block 16377 _ = b 16378 // match: (Cvt64Fto64 x) 16379 // cond: 16380 // result: (CVTTSD2SQ x) 16381 for { 16382 x := v.Args[0] 16383 v.reset(OpAMD64CVTTSD2SQ) 16384 v.AddArg(x) 16385 return true 16386 } 16387 } 16388 func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool { 16389 b := v.Block 16390 _ = b 16391 // match: (Cvt64to32F x) 16392 // cond: 16393 // result: (CVTSQ2SS x) 16394 for { 16395 x := v.Args[0] 16396 v.reset(OpAMD64CVTSQ2SS) 16397 v.AddArg(x) 16398 return true 16399 } 16400 } 16401 func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool { 16402 b := v.Block 16403 _ = b 16404 // match: (Cvt64to64F x) 16405 // cond: 16406 // result: (CVTSQ2SD x) 16407 for { 16408 x := v.Args[0] 16409 v.reset(OpAMD64CVTSQ2SD) 16410 v.AddArg(x) 16411 return true 16412 } 16413 } 16414 func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool { 16415 b := v.Block 16416 _ = b 16417 // match: (DeferCall [argwid] mem) 16418 // cond: 16419 // result: (CALLdefer [argwid] mem) 16420 for { 16421 argwid := v.AuxInt 16422 mem := v.Args[0] 16423 v.reset(OpAMD64CALLdefer) 16424 v.AuxInt = argwid 16425 v.AddArg(mem) 16426 return true 16427 } 16428 } 16429 func rewriteValueAMD64_OpDiv128u(v *Value, config *Config) bool { 16430 b := v.Block 16431 _ = b 16432 // match: (Div128u xhi xlo y) 16433 // cond: 16434 // result: (DIVQU2 xhi xlo y) 16435 for { 16436 xhi := v.Args[0] 16437 xlo := v.Args[1] 16438 y := v.Args[2] 16439 v.reset(OpAMD64DIVQU2) 16440 v.AddArg(xhi) 16441 v.AddArg(xlo) 16442 v.AddArg(y) 16443 return true 16444 } 16445 } 16446 func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool { 16447 b := v.Block 16448 _ = b 16449 // match: (Div16 x y) 16450 // cond: 16451 // result: (Select0 (DIVW x y)) 16452 for { 16453 x := v.Args[0] 16454 y := v.Args[1] 16455 v.reset(OpSelect0) 16456 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 16457 v0.AddArg(x) 16458 v0.AddArg(y) 16459 v.AddArg(v0) 16460 return true 16461 } 16462 } 16463 func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool { 16464 b := v.Block 16465 _ = b 16466 // match: (Div16u x y) 16467 // cond: 16468 // result: (Select0 (DIVWU x y)) 16469 for { 16470 x := v.Args[0] 16471 y := v.Args[1] 16472 v.reset(OpSelect0) 16473 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 16474 v0.AddArg(x) 16475 v0.AddArg(y) 16476 v.AddArg(v0) 16477 return true 16478 } 16479 } 16480 func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool { 16481 b := v.Block 16482 _ = b 16483 // match: (Div32 x y) 16484 // cond: 16485 // result: (Select0 (DIVL x y)) 16486 for { 16487 x := v.Args[0] 16488 y := v.Args[1] 16489 v.reset(OpSelect0) 16490 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32())) 16491 v0.AddArg(x) 16492 v0.AddArg(y) 16493 v.AddArg(v0) 16494 return true 16495 } 16496 } 16497 func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool { 16498 b := v.Block 16499 _ = b 16500 // match: (Div32F x y) 16501 // cond: 16502 // result: (DIVSS x y) 16503 for { 16504 x := v.Args[0] 16505 y := v.Args[1] 16506 v.reset(OpAMD64DIVSS) 16507 v.AddArg(x) 16508 v.AddArg(y) 16509 return true 16510 } 16511 } 16512 func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool { 16513 b := v.Block 16514 _ = b 16515 // match: (Div32u x y) 16516 // cond: 16517 // result: (Select0 (DIVLU x y)) 16518 for { 16519 x := v.Args[0] 16520 y := v.Args[1] 16521 v.reset(OpSelect0) 16522 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32())) 16523 v0.AddArg(x) 16524 v0.AddArg(y) 16525 v.AddArg(v0) 16526 return true 16527 } 16528 } 16529 func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool { 16530 b := v.Block 16531 _ = b 16532 // match: (Div64 x y) 16533 // cond: 16534 // result: (Select0 (DIVQ x y)) 16535 for { 16536 x := v.Args[0] 16537 y := v.Args[1] 16538 v.reset(OpSelect0) 16539 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64())) 16540 v0.AddArg(x) 16541 v0.AddArg(y) 16542 v.AddArg(v0) 16543 return true 16544 } 16545 } 16546 func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool { 16547 b := v.Block 16548 _ = b 16549 // match: (Div64F x y) 16550 // cond: 16551 // result: (DIVSD x y) 16552 for { 16553 x := v.Args[0] 16554 y := v.Args[1] 16555 v.reset(OpAMD64DIVSD) 16556 v.AddArg(x) 16557 v.AddArg(y) 16558 return true 16559 } 16560 } 16561 func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool { 16562 b := v.Block 16563 _ = b 16564 // match: (Div64u x y) 16565 // cond: 16566 // result: (Select0 (DIVQU x y)) 16567 for { 16568 x := v.Args[0] 16569 y := v.Args[1] 16570 v.reset(OpSelect0) 16571 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64())) 16572 v0.AddArg(x) 16573 v0.AddArg(y) 16574 v.AddArg(v0) 16575 return true 16576 } 16577 } 16578 func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool { 16579 b := v.Block 16580 _ = b 16581 // match: (Div8 x y) 16582 // cond: 16583 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 16584 for { 16585 x := v.Args[0] 16586 y := v.Args[1] 16587 v.reset(OpSelect0) 16588 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 16589 v1 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16()) 16590 v1.AddArg(x) 16591 v0.AddArg(v1) 16592 v2 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16()) 16593 v2.AddArg(y) 16594 v0.AddArg(v2) 16595 v.AddArg(v0) 16596 return true 16597 } 16598 } 16599 func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool { 16600 b := v.Block 16601 _ = b 16602 // match: (Div8u x y) 16603 // cond: 16604 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 16605 for { 16606 x := v.Args[0] 16607 y := v.Args[1] 16608 v.reset(OpSelect0) 16609 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 16610 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16()) 16611 v1.AddArg(x) 16612 v0.AddArg(v1) 16613 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16()) 16614 v2.AddArg(y) 16615 v0.AddArg(v2) 16616 v.AddArg(v0) 16617 return true 16618 } 16619 } 16620 func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool { 16621 b := v.Block 16622 _ = b 16623 // match: (Eq16 x y) 16624 // cond: 16625 // result: (SETEQ (CMPW x y)) 16626 for { 16627 x := v.Args[0] 16628 y := v.Args[1] 16629 v.reset(OpAMD64SETEQ) 16630 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 16631 v0.AddArg(x) 16632 v0.AddArg(y) 16633 v.AddArg(v0) 16634 return true 16635 } 16636 } 16637 func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool { 16638 b := v.Block 16639 _ = b 16640 // match: (Eq32 x y) 16641 // cond: 16642 // result: (SETEQ (CMPL x y)) 16643 for { 16644 x := v.Args[0] 16645 y := v.Args[1] 16646 v.reset(OpAMD64SETEQ) 16647 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 16648 v0.AddArg(x) 16649 v0.AddArg(y) 16650 v.AddArg(v0) 16651 return true 16652 } 16653 } 16654 func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool { 16655 b := v.Block 16656 _ = b 16657 // match: (Eq32F x y) 16658 // cond: 16659 // result: (SETEQF (UCOMISS x y)) 16660 for { 16661 x := v.Args[0] 16662 y := v.Args[1] 16663 v.reset(OpAMD64SETEQF) 16664 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 16665 v0.AddArg(x) 16666 v0.AddArg(y) 16667 v.AddArg(v0) 16668 return true 16669 } 16670 } 16671 func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool { 16672 b := v.Block 16673 _ = b 16674 // match: (Eq64 x y) 16675 // cond: 16676 // result: (SETEQ (CMPQ x y)) 16677 for { 16678 x := v.Args[0] 16679 y := v.Args[1] 16680 v.reset(OpAMD64SETEQ) 16681 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 16682 v0.AddArg(x) 16683 v0.AddArg(y) 16684 v.AddArg(v0) 16685 return true 16686 } 16687 } 16688 func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool { 16689 b := v.Block 16690 _ = b 16691 // match: (Eq64F x y) 16692 // cond: 16693 // result: (SETEQF (UCOMISD x y)) 16694 for { 16695 x := v.Args[0] 16696 y := v.Args[1] 16697 v.reset(OpAMD64SETEQF) 16698 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 16699 v0.AddArg(x) 16700 v0.AddArg(y) 16701 v.AddArg(v0) 16702 return true 16703 } 16704 } 16705 func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool { 16706 b := v.Block 16707 _ = b 16708 // match: (Eq8 x y) 16709 // cond: 16710 // result: (SETEQ (CMPB x y)) 16711 for { 16712 x := v.Args[0] 16713 y := v.Args[1] 16714 v.reset(OpAMD64SETEQ) 16715 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 16716 v0.AddArg(x) 16717 v0.AddArg(y) 16718 v.AddArg(v0) 16719 return true 16720 } 16721 } 16722 func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool { 16723 b := v.Block 16724 _ = b 16725 // match: (EqB x y) 16726 // cond: 16727 // result: (SETEQ (CMPB x y)) 16728 for { 16729 x := v.Args[0] 16730 y := v.Args[1] 16731 v.reset(OpAMD64SETEQ) 16732 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 16733 v0.AddArg(x) 16734 v0.AddArg(y) 16735 v.AddArg(v0) 16736 return true 16737 } 16738 } 16739 func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool { 16740 b := v.Block 16741 _ = b 16742 // match: (EqPtr x y) 16743 // cond: config.PtrSize == 8 16744 // result: (SETEQ (CMPQ x y)) 16745 for { 16746 x := v.Args[0] 16747 y := v.Args[1] 16748 if !(config.PtrSize == 8) { 16749 break 16750 } 16751 v.reset(OpAMD64SETEQ) 16752 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 16753 v0.AddArg(x) 16754 v0.AddArg(y) 16755 v.AddArg(v0) 16756 return true 16757 } 16758 // match: (EqPtr x y) 16759 // cond: config.PtrSize == 4 16760 // result: (SETEQ (CMPL x y)) 16761 for { 16762 x := v.Args[0] 16763 y := v.Args[1] 16764 if !(config.PtrSize == 4) { 16765 break 16766 } 16767 v.reset(OpAMD64SETEQ) 16768 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 16769 v0.AddArg(x) 16770 v0.AddArg(y) 16771 v.AddArg(v0) 16772 return true 16773 } 16774 return false 16775 } 16776 func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool { 16777 b := v.Block 16778 _ = b 16779 // match: (Geq16 x y) 16780 // cond: 16781 // result: (SETGE (CMPW x y)) 16782 for { 16783 x := v.Args[0] 16784 y := v.Args[1] 16785 v.reset(OpAMD64SETGE) 16786 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 16787 v0.AddArg(x) 16788 v0.AddArg(y) 16789 v.AddArg(v0) 16790 return true 16791 } 16792 } 16793 func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool { 16794 b := v.Block 16795 _ = b 16796 // match: (Geq16U x y) 16797 // cond: 16798 // result: (SETAE (CMPW x y)) 16799 for { 16800 x := v.Args[0] 16801 y := v.Args[1] 16802 v.reset(OpAMD64SETAE) 16803 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 16804 v0.AddArg(x) 16805 v0.AddArg(y) 16806 v.AddArg(v0) 16807 return true 16808 } 16809 } 16810 func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool { 16811 b := v.Block 16812 _ = b 16813 // match: (Geq32 x y) 16814 // cond: 16815 // result: (SETGE (CMPL x y)) 16816 for { 16817 x := v.Args[0] 16818 y := v.Args[1] 16819 v.reset(OpAMD64SETGE) 16820 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 16821 v0.AddArg(x) 16822 v0.AddArg(y) 16823 v.AddArg(v0) 16824 return true 16825 } 16826 } 16827 func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool { 16828 b := v.Block 16829 _ = b 16830 // match: (Geq32F x y) 16831 // cond: 16832 // result: (SETGEF (UCOMISS x y)) 16833 for { 16834 x := v.Args[0] 16835 y := v.Args[1] 16836 v.reset(OpAMD64SETGEF) 16837 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 16838 v0.AddArg(x) 16839 v0.AddArg(y) 16840 v.AddArg(v0) 16841 return true 16842 } 16843 } 16844 func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool { 16845 b := v.Block 16846 _ = b 16847 // match: (Geq32U x y) 16848 // cond: 16849 // result: (SETAE (CMPL x y)) 16850 for { 16851 x := v.Args[0] 16852 y := v.Args[1] 16853 v.reset(OpAMD64SETAE) 16854 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 16855 v0.AddArg(x) 16856 v0.AddArg(y) 16857 v.AddArg(v0) 16858 return true 16859 } 16860 } 16861 func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool { 16862 b := v.Block 16863 _ = b 16864 // match: (Geq64 x y) 16865 // cond: 16866 // result: (SETGE (CMPQ x y)) 16867 for { 16868 x := v.Args[0] 16869 y := v.Args[1] 16870 v.reset(OpAMD64SETGE) 16871 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 16872 v0.AddArg(x) 16873 v0.AddArg(y) 16874 v.AddArg(v0) 16875 return true 16876 } 16877 } 16878 func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool { 16879 b := v.Block 16880 _ = b 16881 // match: (Geq64F x y) 16882 // cond: 16883 // result: (SETGEF (UCOMISD x y)) 16884 for { 16885 x := v.Args[0] 16886 y := v.Args[1] 16887 v.reset(OpAMD64SETGEF) 16888 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 16889 v0.AddArg(x) 16890 v0.AddArg(y) 16891 v.AddArg(v0) 16892 return true 16893 } 16894 } 16895 func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool { 16896 b := v.Block 16897 _ = b 16898 // match: (Geq64U x y) 16899 // cond: 16900 // result: (SETAE (CMPQ x y)) 16901 for { 16902 x := v.Args[0] 16903 y := v.Args[1] 16904 v.reset(OpAMD64SETAE) 16905 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 16906 v0.AddArg(x) 16907 v0.AddArg(y) 16908 v.AddArg(v0) 16909 return true 16910 } 16911 } 16912 func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool { 16913 b := v.Block 16914 _ = b 16915 // match: (Geq8 x y) 16916 // cond: 16917 // result: (SETGE (CMPB x y)) 16918 for { 16919 x := v.Args[0] 16920 y := v.Args[1] 16921 v.reset(OpAMD64SETGE) 16922 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 16923 v0.AddArg(x) 16924 v0.AddArg(y) 16925 v.AddArg(v0) 16926 return true 16927 } 16928 } 16929 func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool { 16930 b := v.Block 16931 _ = b 16932 // match: (Geq8U x y) 16933 // cond: 16934 // result: (SETAE (CMPB x y)) 16935 for { 16936 x := v.Args[0] 16937 y := v.Args[1] 16938 v.reset(OpAMD64SETAE) 16939 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 16940 v0.AddArg(x) 16941 v0.AddArg(y) 16942 v.AddArg(v0) 16943 return true 16944 } 16945 } 16946 func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool { 16947 b := v.Block 16948 _ = b 16949 // match: (GetClosurePtr) 16950 // cond: 16951 // result: (LoweredGetClosurePtr) 16952 for { 16953 v.reset(OpAMD64LoweredGetClosurePtr) 16954 return true 16955 } 16956 } 16957 func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool { 16958 b := v.Block 16959 _ = b 16960 // match: (GetG mem) 16961 // cond: 16962 // result: (LoweredGetG mem) 16963 for { 16964 mem := v.Args[0] 16965 v.reset(OpAMD64LoweredGetG) 16966 v.AddArg(mem) 16967 return true 16968 } 16969 } 16970 func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool { 16971 b := v.Block 16972 _ = b 16973 // match: (GoCall [argwid] mem) 16974 // cond: 16975 // result: (CALLgo [argwid] mem) 16976 for { 16977 argwid := v.AuxInt 16978 mem := v.Args[0] 16979 v.reset(OpAMD64CALLgo) 16980 v.AuxInt = argwid 16981 v.AddArg(mem) 16982 return true 16983 } 16984 } 16985 func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool { 16986 b := v.Block 16987 _ = b 16988 // match: (Greater16 x y) 16989 // cond: 16990 // result: (SETG (CMPW x y)) 16991 for { 16992 x := v.Args[0] 16993 y := v.Args[1] 16994 v.reset(OpAMD64SETG) 16995 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 16996 v0.AddArg(x) 16997 v0.AddArg(y) 16998 v.AddArg(v0) 16999 return true 17000 } 17001 } 17002 func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool { 17003 b := v.Block 17004 _ = b 17005 // match: (Greater16U x y) 17006 // cond: 17007 // result: (SETA (CMPW x y)) 17008 for { 17009 x := v.Args[0] 17010 y := v.Args[1] 17011 v.reset(OpAMD64SETA) 17012 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 17013 v0.AddArg(x) 17014 v0.AddArg(y) 17015 v.AddArg(v0) 17016 return true 17017 } 17018 } 17019 func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool { 17020 b := v.Block 17021 _ = b 17022 // match: (Greater32 x y) 17023 // cond: 17024 // result: (SETG (CMPL x y)) 17025 for { 17026 x := v.Args[0] 17027 y := v.Args[1] 17028 v.reset(OpAMD64SETG) 17029 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17030 v0.AddArg(x) 17031 v0.AddArg(y) 17032 v.AddArg(v0) 17033 return true 17034 } 17035 } 17036 func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool { 17037 b := v.Block 17038 _ = b 17039 // match: (Greater32F x y) 17040 // cond: 17041 // result: (SETGF (UCOMISS x y)) 17042 for { 17043 x := v.Args[0] 17044 y := v.Args[1] 17045 v.reset(OpAMD64SETGF) 17046 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 17047 v0.AddArg(x) 17048 v0.AddArg(y) 17049 v.AddArg(v0) 17050 return true 17051 } 17052 } 17053 func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool { 17054 b := v.Block 17055 _ = b 17056 // match: (Greater32U x y) 17057 // cond: 17058 // result: (SETA (CMPL x y)) 17059 for { 17060 x := v.Args[0] 17061 y := v.Args[1] 17062 v.reset(OpAMD64SETA) 17063 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17064 v0.AddArg(x) 17065 v0.AddArg(y) 17066 v.AddArg(v0) 17067 return true 17068 } 17069 } 17070 func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool { 17071 b := v.Block 17072 _ = b 17073 // match: (Greater64 x y) 17074 // cond: 17075 // result: (SETG (CMPQ x y)) 17076 for { 17077 x := v.Args[0] 17078 y := v.Args[1] 17079 v.reset(OpAMD64SETG) 17080 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17081 v0.AddArg(x) 17082 v0.AddArg(y) 17083 v.AddArg(v0) 17084 return true 17085 } 17086 } 17087 func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool { 17088 b := v.Block 17089 _ = b 17090 // match: (Greater64F x y) 17091 // cond: 17092 // result: (SETGF (UCOMISD x y)) 17093 for { 17094 x := v.Args[0] 17095 y := v.Args[1] 17096 v.reset(OpAMD64SETGF) 17097 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 17098 v0.AddArg(x) 17099 v0.AddArg(y) 17100 v.AddArg(v0) 17101 return true 17102 } 17103 } 17104 func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool { 17105 b := v.Block 17106 _ = b 17107 // match: (Greater64U x y) 17108 // cond: 17109 // result: (SETA (CMPQ x y)) 17110 for { 17111 x := v.Args[0] 17112 y := v.Args[1] 17113 v.reset(OpAMD64SETA) 17114 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17115 v0.AddArg(x) 17116 v0.AddArg(y) 17117 v.AddArg(v0) 17118 return true 17119 } 17120 } 17121 func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool { 17122 b := v.Block 17123 _ = b 17124 // match: (Greater8 x y) 17125 // cond: 17126 // result: (SETG (CMPB x y)) 17127 for { 17128 x := v.Args[0] 17129 y := v.Args[1] 17130 v.reset(OpAMD64SETG) 17131 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 17132 v0.AddArg(x) 17133 v0.AddArg(y) 17134 v.AddArg(v0) 17135 return true 17136 } 17137 } 17138 func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool { 17139 b := v.Block 17140 _ = b 17141 // match: (Greater8U x y) 17142 // cond: 17143 // result: (SETA (CMPB x y)) 17144 for { 17145 x := v.Args[0] 17146 y := v.Args[1] 17147 v.reset(OpAMD64SETA) 17148 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 17149 v0.AddArg(x) 17150 v0.AddArg(y) 17151 v.AddArg(v0) 17152 return true 17153 } 17154 } 17155 func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool { 17156 b := v.Block 17157 _ = b 17158 // match: (Hmul16 x y) 17159 // cond: 17160 // result: (HMULW x y) 17161 for { 17162 x := v.Args[0] 17163 y := v.Args[1] 17164 v.reset(OpAMD64HMULW) 17165 v.AddArg(x) 17166 v.AddArg(y) 17167 return true 17168 } 17169 } 17170 func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool { 17171 b := v.Block 17172 _ = b 17173 // match: (Hmul16u x y) 17174 // cond: 17175 // result: (HMULWU x y) 17176 for { 17177 x := v.Args[0] 17178 y := v.Args[1] 17179 v.reset(OpAMD64HMULWU) 17180 v.AddArg(x) 17181 v.AddArg(y) 17182 return true 17183 } 17184 } 17185 func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool { 17186 b := v.Block 17187 _ = b 17188 // match: (Hmul32 x y) 17189 // cond: 17190 // result: (HMULL x y) 17191 for { 17192 x := v.Args[0] 17193 y := v.Args[1] 17194 v.reset(OpAMD64HMULL) 17195 v.AddArg(x) 17196 v.AddArg(y) 17197 return true 17198 } 17199 } 17200 func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool { 17201 b := v.Block 17202 _ = b 17203 // match: (Hmul32u x y) 17204 // cond: 17205 // result: (HMULLU x y) 17206 for { 17207 x := v.Args[0] 17208 y := v.Args[1] 17209 v.reset(OpAMD64HMULLU) 17210 v.AddArg(x) 17211 v.AddArg(y) 17212 return true 17213 } 17214 } 17215 func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool { 17216 b := v.Block 17217 _ = b 17218 // match: (Hmul64 x y) 17219 // cond: 17220 // result: (HMULQ x y) 17221 for { 17222 x := v.Args[0] 17223 y := v.Args[1] 17224 v.reset(OpAMD64HMULQ) 17225 v.AddArg(x) 17226 v.AddArg(y) 17227 return true 17228 } 17229 } 17230 func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool { 17231 b := v.Block 17232 _ = b 17233 // match: (Hmul64u x y) 17234 // cond: 17235 // result: (HMULQU x y) 17236 for { 17237 x := v.Args[0] 17238 y := v.Args[1] 17239 v.reset(OpAMD64HMULQU) 17240 v.AddArg(x) 17241 v.AddArg(y) 17242 return true 17243 } 17244 } 17245 func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool { 17246 b := v.Block 17247 _ = b 17248 // match: (Hmul8 x y) 17249 // cond: 17250 // result: (HMULB x y) 17251 for { 17252 x := v.Args[0] 17253 y := v.Args[1] 17254 v.reset(OpAMD64HMULB) 17255 v.AddArg(x) 17256 v.AddArg(y) 17257 return true 17258 } 17259 } 17260 func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool { 17261 b := v.Block 17262 _ = b 17263 // match: (Hmul8u x y) 17264 // cond: 17265 // result: (HMULBU x y) 17266 for { 17267 x := v.Args[0] 17268 y := v.Args[1] 17269 v.reset(OpAMD64HMULBU) 17270 v.AddArg(x) 17271 v.AddArg(y) 17272 return true 17273 } 17274 } 17275 func rewriteValueAMD64_OpInt64Hi(v *Value, config *Config) bool { 17276 b := v.Block 17277 _ = b 17278 // match: (Int64Hi x) 17279 // cond: 17280 // result: (SHRQconst [32] x) 17281 for { 17282 x := v.Args[0] 17283 v.reset(OpAMD64SHRQconst) 17284 v.AuxInt = 32 17285 v.AddArg(x) 17286 return true 17287 } 17288 } 17289 func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool { 17290 b := v.Block 17291 _ = b 17292 // match: (InterCall [argwid] entry mem) 17293 // cond: 17294 // result: (CALLinter [argwid] entry mem) 17295 for { 17296 argwid := v.AuxInt 17297 entry := v.Args[0] 17298 mem := v.Args[1] 17299 v.reset(OpAMD64CALLinter) 17300 v.AuxInt = argwid 17301 v.AddArg(entry) 17302 v.AddArg(mem) 17303 return true 17304 } 17305 } 17306 func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool { 17307 b := v.Block 17308 _ = b 17309 // match: (IsInBounds idx len) 17310 // cond: 17311 // result: (SETB (CMPQ idx len)) 17312 for { 17313 idx := v.Args[0] 17314 len := v.Args[1] 17315 v.reset(OpAMD64SETB) 17316 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17317 v0.AddArg(idx) 17318 v0.AddArg(len) 17319 v.AddArg(v0) 17320 return true 17321 } 17322 } 17323 func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool { 17324 b := v.Block 17325 _ = b 17326 // match: (IsNonNil p) 17327 // cond: config.PtrSize == 8 17328 // result: (SETNE (TESTQ p p)) 17329 for { 17330 p := v.Args[0] 17331 if !(config.PtrSize == 8) { 17332 break 17333 } 17334 v.reset(OpAMD64SETNE) 17335 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, TypeFlags) 17336 v0.AddArg(p) 17337 v0.AddArg(p) 17338 v.AddArg(v0) 17339 return true 17340 } 17341 // match: (IsNonNil p) 17342 // cond: config.PtrSize == 4 17343 // result: (SETNE (TESTL p p)) 17344 for { 17345 p := v.Args[0] 17346 if !(config.PtrSize == 4) { 17347 break 17348 } 17349 v.reset(OpAMD64SETNE) 17350 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, TypeFlags) 17351 v0.AddArg(p) 17352 v0.AddArg(p) 17353 v.AddArg(v0) 17354 return true 17355 } 17356 return false 17357 } 17358 func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool { 17359 b := v.Block 17360 _ = b 17361 // match: (IsSliceInBounds idx len) 17362 // cond: 17363 // result: (SETBE (CMPQ idx len)) 17364 for { 17365 idx := v.Args[0] 17366 len := v.Args[1] 17367 v.reset(OpAMD64SETBE) 17368 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17369 v0.AddArg(idx) 17370 v0.AddArg(len) 17371 v.AddArg(v0) 17372 return true 17373 } 17374 } 17375 func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool { 17376 b := v.Block 17377 _ = b 17378 // match: (Leq16 x y) 17379 // cond: 17380 // result: (SETLE (CMPW x y)) 17381 for { 17382 x := v.Args[0] 17383 y := v.Args[1] 17384 v.reset(OpAMD64SETLE) 17385 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 17386 v0.AddArg(x) 17387 v0.AddArg(y) 17388 v.AddArg(v0) 17389 return true 17390 } 17391 } 17392 func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool { 17393 b := v.Block 17394 _ = b 17395 // match: (Leq16U x y) 17396 // cond: 17397 // result: (SETBE (CMPW x y)) 17398 for { 17399 x := v.Args[0] 17400 y := v.Args[1] 17401 v.reset(OpAMD64SETBE) 17402 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 17403 v0.AddArg(x) 17404 v0.AddArg(y) 17405 v.AddArg(v0) 17406 return true 17407 } 17408 } 17409 func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool { 17410 b := v.Block 17411 _ = b 17412 // match: (Leq32 x y) 17413 // cond: 17414 // result: (SETLE (CMPL x y)) 17415 for { 17416 x := v.Args[0] 17417 y := v.Args[1] 17418 v.reset(OpAMD64SETLE) 17419 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17420 v0.AddArg(x) 17421 v0.AddArg(y) 17422 v.AddArg(v0) 17423 return true 17424 } 17425 } 17426 func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool { 17427 b := v.Block 17428 _ = b 17429 // match: (Leq32F x y) 17430 // cond: 17431 // result: (SETGEF (UCOMISS y x)) 17432 for { 17433 x := v.Args[0] 17434 y := v.Args[1] 17435 v.reset(OpAMD64SETGEF) 17436 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 17437 v0.AddArg(y) 17438 v0.AddArg(x) 17439 v.AddArg(v0) 17440 return true 17441 } 17442 } 17443 func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool { 17444 b := v.Block 17445 _ = b 17446 // match: (Leq32U x y) 17447 // cond: 17448 // result: (SETBE (CMPL x y)) 17449 for { 17450 x := v.Args[0] 17451 y := v.Args[1] 17452 v.reset(OpAMD64SETBE) 17453 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17454 v0.AddArg(x) 17455 v0.AddArg(y) 17456 v.AddArg(v0) 17457 return true 17458 } 17459 } 17460 func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool { 17461 b := v.Block 17462 _ = b 17463 // match: (Leq64 x y) 17464 // cond: 17465 // result: (SETLE (CMPQ x y)) 17466 for { 17467 x := v.Args[0] 17468 y := v.Args[1] 17469 v.reset(OpAMD64SETLE) 17470 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17471 v0.AddArg(x) 17472 v0.AddArg(y) 17473 v.AddArg(v0) 17474 return true 17475 } 17476 } 17477 func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool { 17478 b := v.Block 17479 _ = b 17480 // match: (Leq64F x y) 17481 // cond: 17482 // result: (SETGEF (UCOMISD y x)) 17483 for { 17484 x := v.Args[0] 17485 y := v.Args[1] 17486 v.reset(OpAMD64SETGEF) 17487 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 17488 v0.AddArg(y) 17489 v0.AddArg(x) 17490 v.AddArg(v0) 17491 return true 17492 } 17493 } 17494 func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool { 17495 b := v.Block 17496 _ = b 17497 // match: (Leq64U x y) 17498 // cond: 17499 // result: (SETBE (CMPQ x y)) 17500 for { 17501 x := v.Args[0] 17502 y := v.Args[1] 17503 v.reset(OpAMD64SETBE) 17504 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17505 v0.AddArg(x) 17506 v0.AddArg(y) 17507 v.AddArg(v0) 17508 return true 17509 } 17510 } 17511 func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool { 17512 b := v.Block 17513 _ = b 17514 // match: (Leq8 x y) 17515 // cond: 17516 // result: (SETLE (CMPB x y)) 17517 for { 17518 x := v.Args[0] 17519 y := v.Args[1] 17520 v.reset(OpAMD64SETLE) 17521 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 17522 v0.AddArg(x) 17523 v0.AddArg(y) 17524 v.AddArg(v0) 17525 return true 17526 } 17527 } 17528 func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool { 17529 b := v.Block 17530 _ = b 17531 // match: (Leq8U x y) 17532 // cond: 17533 // result: (SETBE (CMPB x y)) 17534 for { 17535 x := v.Args[0] 17536 y := v.Args[1] 17537 v.reset(OpAMD64SETBE) 17538 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 17539 v0.AddArg(x) 17540 v0.AddArg(y) 17541 v.AddArg(v0) 17542 return true 17543 } 17544 } 17545 func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool { 17546 b := v.Block 17547 _ = b 17548 // match: (Less16 x y) 17549 // cond: 17550 // result: (SETL (CMPW x y)) 17551 for { 17552 x := v.Args[0] 17553 y := v.Args[1] 17554 v.reset(OpAMD64SETL) 17555 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 17556 v0.AddArg(x) 17557 v0.AddArg(y) 17558 v.AddArg(v0) 17559 return true 17560 } 17561 } 17562 func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool { 17563 b := v.Block 17564 _ = b 17565 // match: (Less16U x y) 17566 // cond: 17567 // result: (SETB (CMPW x y)) 17568 for { 17569 x := v.Args[0] 17570 y := v.Args[1] 17571 v.reset(OpAMD64SETB) 17572 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 17573 v0.AddArg(x) 17574 v0.AddArg(y) 17575 v.AddArg(v0) 17576 return true 17577 } 17578 } 17579 func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool { 17580 b := v.Block 17581 _ = b 17582 // match: (Less32 x y) 17583 // cond: 17584 // result: (SETL (CMPL x y)) 17585 for { 17586 x := v.Args[0] 17587 y := v.Args[1] 17588 v.reset(OpAMD64SETL) 17589 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17590 v0.AddArg(x) 17591 v0.AddArg(y) 17592 v.AddArg(v0) 17593 return true 17594 } 17595 } 17596 func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool { 17597 b := v.Block 17598 _ = b 17599 // match: (Less32F x y) 17600 // cond: 17601 // result: (SETGF (UCOMISS y x)) 17602 for { 17603 x := v.Args[0] 17604 y := v.Args[1] 17605 v.reset(OpAMD64SETGF) 17606 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 17607 v0.AddArg(y) 17608 v0.AddArg(x) 17609 v.AddArg(v0) 17610 return true 17611 } 17612 } 17613 func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool { 17614 b := v.Block 17615 _ = b 17616 // match: (Less32U x y) 17617 // cond: 17618 // result: (SETB (CMPL x y)) 17619 for { 17620 x := v.Args[0] 17621 y := v.Args[1] 17622 v.reset(OpAMD64SETB) 17623 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17624 v0.AddArg(x) 17625 v0.AddArg(y) 17626 v.AddArg(v0) 17627 return true 17628 } 17629 } 17630 func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool { 17631 b := v.Block 17632 _ = b 17633 // match: (Less64 x y) 17634 // cond: 17635 // result: (SETL (CMPQ x y)) 17636 for { 17637 x := v.Args[0] 17638 y := v.Args[1] 17639 v.reset(OpAMD64SETL) 17640 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17641 v0.AddArg(x) 17642 v0.AddArg(y) 17643 v.AddArg(v0) 17644 return true 17645 } 17646 } 17647 func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool { 17648 b := v.Block 17649 _ = b 17650 // match: (Less64F x y) 17651 // cond: 17652 // result: (SETGF (UCOMISD y x)) 17653 for { 17654 x := v.Args[0] 17655 y := v.Args[1] 17656 v.reset(OpAMD64SETGF) 17657 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 17658 v0.AddArg(y) 17659 v0.AddArg(x) 17660 v.AddArg(v0) 17661 return true 17662 } 17663 } 17664 func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool { 17665 b := v.Block 17666 _ = b 17667 // match: (Less64U x y) 17668 // cond: 17669 // result: (SETB (CMPQ x y)) 17670 for { 17671 x := v.Args[0] 17672 y := v.Args[1] 17673 v.reset(OpAMD64SETB) 17674 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17675 v0.AddArg(x) 17676 v0.AddArg(y) 17677 v.AddArg(v0) 17678 return true 17679 } 17680 } 17681 func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool { 17682 b := v.Block 17683 _ = b 17684 // match: (Less8 x y) 17685 // cond: 17686 // result: (SETL (CMPB x y)) 17687 for { 17688 x := v.Args[0] 17689 y := v.Args[1] 17690 v.reset(OpAMD64SETL) 17691 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 17692 v0.AddArg(x) 17693 v0.AddArg(y) 17694 v.AddArg(v0) 17695 return true 17696 } 17697 } 17698 func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool { 17699 b := v.Block 17700 _ = b 17701 // match: (Less8U x y) 17702 // cond: 17703 // result: (SETB (CMPB x y)) 17704 for { 17705 x := v.Args[0] 17706 y := v.Args[1] 17707 v.reset(OpAMD64SETB) 17708 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 17709 v0.AddArg(x) 17710 v0.AddArg(y) 17711 v.AddArg(v0) 17712 return true 17713 } 17714 } 17715 func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool { 17716 b := v.Block 17717 _ = b 17718 // match: (Load <t> ptr mem) 17719 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 17720 // result: (MOVQload ptr mem) 17721 for { 17722 t := v.Type 17723 ptr := v.Args[0] 17724 mem := v.Args[1] 17725 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 17726 break 17727 } 17728 v.reset(OpAMD64MOVQload) 17729 v.AddArg(ptr) 17730 v.AddArg(mem) 17731 return true 17732 } 17733 // match: (Load <t> ptr mem) 17734 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 17735 // result: (MOVLload ptr mem) 17736 for { 17737 t := v.Type 17738 ptr := v.Args[0] 17739 mem := v.Args[1] 17740 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 17741 break 17742 } 17743 v.reset(OpAMD64MOVLload) 17744 v.AddArg(ptr) 17745 v.AddArg(mem) 17746 return true 17747 } 17748 // match: (Load <t> ptr mem) 17749 // cond: is16BitInt(t) 17750 // result: (MOVWload ptr mem) 17751 for { 17752 t := v.Type 17753 ptr := v.Args[0] 17754 mem := v.Args[1] 17755 if !(is16BitInt(t)) { 17756 break 17757 } 17758 v.reset(OpAMD64MOVWload) 17759 v.AddArg(ptr) 17760 v.AddArg(mem) 17761 return true 17762 } 17763 // match: (Load <t> ptr mem) 17764 // cond: (t.IsBoolean() || is8BitInt(t)) 17765 // result: (MOVBload ptr mem) 17766 for { 17767 t := v.Type 17768 ptr := v.Args[0] 17769 mem := v.Args[1] 17770 if !(t.IsBoolean() || is8BitInt(t)) { 17771 break 17772 } 17773 v.reset(OpAMD64MOVBload) 17774 v.AddArg(ptr) 17775 v.AddArg(mem) 17776 return true 17777 } 17778 // match: (Load <t> ptr mem) 17779 // cond: is32BitFloat(t) 17780 // result: (MOVSSload ptr mem) 17781 for { 17782 t := v.Type 17783 ptr := v.Args[0] 17784 mem := v.Args[1] 17785 if !(is32BitFloat(t)) { 17786 break 17787 } 17788 v.reset(OpAMD64MOVSSload) 17789 v.AddArg(ptr) 17790 v.AddArg(mem) 17791 return true 17792 } 17793 // match: (Load <t> ptr mem) 17794 // cond: is64BitFloat(t) 17795 // result: (MOVSDload ptr mem) 17796 for { 17797 t := v.Type 17798 ptr := v.Args[0] 17799 mem := v.Args[1] 17800 if !(is64BitFloat(t)) { 17801 break 17802 } 17803 v.reset(OpAMD64MOVSDload) 17804 v.AddArg(ptr) 17805 v.AddArg(mem) 17806 return true 17807 } 17808 return false 17809 } 17810 func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool { 17811 b := v.Block 17812 _ = b 17813 // match: (Lsh16x16 <t> x y) 17814 // cond: 17815 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 17816 for { 17817 t := v.Type 17818 x := v.Args[0] 17819 y := v.Args[1] 17820 v.reset(OpAMD64ANDL) 17821 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 17822 v0.AddArg(x) 17823 v0.AddArg(y) 17824 v.AddArg(v0) 17825 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 17826 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 17827 v2.AuxInt = 32 17828 v2.AddArg(y) 17829 v1.AddArg(v2) 17830 v.AddArg(v1) 17831 return true 17832 } 17833 } 17834 func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool { 17835 b := v.Block 17836 _ = b 17837 // match: (Lsh16x32 <t> x y) 17838 // cond: 17839 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 17840 for { 17841 t := v.Type 17842 x := v.Args[0] 17843 y := v.Args[1] 17844 v.reset(OpAMD64ANDL) 17845 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 17846 v0.AddArg(x) 17847 v0.AddArg(y) 17848 v.AddArg(v0) 17849 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 17850 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 17851 v2.AuxInt = 32 17852 v2.AddArg(y) 17853 v1.AddArg(v2) 17854 v.AddArg(v1) 17855 return true 17856 } 17857 } 17858 func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool { 17859 b := v.Block 17860 _ = b 17861 // match: (Lsh16x64 <t> x y) 17862 // cond: 17863 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 17864 for { 17865 t := v.Type 17866 x := v.Args[0] 17867 y := v.Args[1] 17868 v.reset(OpAMD64ANDL) 17869 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 17870 v0.AddArg(x) 17871 v0.AddArg(y) 17872 v.AddArg(v0) 17873 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 17874 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 17875 v2.AuxInt = 32 17876 v2.AddArg(y) 17877 v1.AddArg(v2) 17878 v.AddArg(v1) 17879 return true 17880 } 17881 } 17882 func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool { 17883 b := v.Block 17884 _ = b 17885 // match: (Lsh16x8 <t> x y) 17886 // cond: 17887 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 17888 for { 17889 t := v.Type 17890 x := v.Args[0] 17891 y := v.Args[1] 17892 v.reset(OpAMD64ANDL) 17893 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 17894 v0.AddArg(x) 17895 v0.AddArg(y) 17896 v.AddArg(v0) 17897 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 17898 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 17899 v2.AuxInt = 32 17900 v2.AddArg(y) 17901 v1.AddArg(v2) 17902 v.AddArg(v1) 17903 return true 17904 } 17905 } 17906 func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool { 17907 b := v.Block 17908 _ = b 17909 // match: (Lsh32x16 <t> x y) 17910 // cond: 17911 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 17912 for { 17913 t := v.Type 17914 x := v.Args[0] 17915 y := v.Args[1] 17916 v.reset(OpAMD64ANDL) 17917 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 17918 v0.AddArg(x) 17919 v0.AddArg(y) 17920 v.AddArg(v0) 17921 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 17922 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 17923 v2.AuxInt = 32 17924 v2.AddArg(y) 17925 v1.AddArg(v2) 17926 v.AddArg(v1) 17927 return true 17928 } 17929 } 17930 func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool { 17931 b := v.Block 17932 _ = b 17933 // match: (Lsh32x32 <t> x y) 17934 // cond: 17935 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 17936 for { 17937 t := v.Type 17938 x := v.Args[0] 17939 y := v.Args[1] 17940 v.reset(OpAMD64ANDL) 17941 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 17942 v0.AddArg(x) 17943 v0.AddArg(y) 17944 v.AddArg(v0) 17945 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 17946 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 17947 v2.AuxInt = 32 17948 v2.AddArg(y) 17949 v1.AddArg(v2) 17950 v.AddArg(v1) 17951 return true 17952 } 17953 } 17954 func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool { 17955 b := v.Block 17956 _ = b 17957 // match: (Lsh32x64 <t> x y) 17958 // cond: 17959 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 17960 for { 17961 t := v.Type 17962 x := v.Args[0] 17963 y := v.Args[1] 17964 v.reset(OpAMD64ANDL) 17965 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 17966 v0.AddArg(x) 17967 v0.AddArg(y) 17968 v.AddArg(v0) 17969 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 17970 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 17971 v2.AuxInt = 32 17972 v2.AddArg(y) 17973 v1.AddArg(v2) 17974 v.AddArg(v1) 17975 return true 17976 } 17977 } 17978 func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool { 17979 b := v.Block 17980 _ = b 17981 // match: (Lsh32x8 <t> x y) 17982 // cond: 17983 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 17984 for { 17985 t := v.Type 17986 x := v.Args[0] 17987 y := v.Args[1] 17988 v.reset(OpAMD64ANDL) 17989 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 17990 v0.AddArg(x) 17991 v0.AddArg(y) 17992 v.AddArg(v0) 17993 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 17994 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 17995 v2.AuxInt = 32 17996 v2.AddArg(y) 17997 v1.AddArg(v2) 17998 v.AddArg(v1) 17999 return true 18000 } 18001 } 18002 func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool { 18003 b := v.Block 18004 _ = b 18005 // match: (Lsh64x16 <t> x y) 18006 // cond: 18007 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 18008 for { 18009 t := v.Type 18010 x := v.Args[0] 18011 y := v.Args[1] 18012 v.reset(OpAMD64ANDQ) 18013 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 18014 v0.AddArg(x) 18015 v0.AddArg(y) 18016 v.AddArg(v0) 18017 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 18018 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 18019 v2.AuxInt = 64 18020 v2.AddArg(y) 18021 v1.AddArg(v2) 18022 v.AddArg(v1) 18023 return true 18024 } 18025 } 18026 func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool { 18027 b := v.Block 18028 _ = b 18029 // match: (Lsh64x32 <t> x y) 18030 // cond: 18031 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 18032 for { 18033 t := v.Type 18034 x := v.Args[0] 18035 y := v.Args[1] 18036 v.reset(OpAMD64ANDQ) 18037 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 18038 v0.AddArg(x) 18039 v0.AddArg(y) 18040 v.AddArg(v0) 18041 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 18042 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 18043 v2.AuxInt = 64 18044 v2.AddArg(y) 18045 v1.AddArg(v2) 18046 v.AddArg(v1) 18047 return true 18048 } 18049 } 18050 func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool { 18051 b := v.Block 18052 _ = b 18053 // match: (Lsh64x64 <t> x y) 18054 // cond: 18055 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 18056 for { 18057 t := v.Type 18058 x := v.Args[0] 18059 y := v.Args[1] 18060 v.reset(OpAMD64ANDQ) 18061 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 18062 v0.AddArg(x) 18063 v0.AddArg(y) 18064 v.AddArg(v0) 18065 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 18066 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 18067 v2.AuxInt = 64 18068 v2.AddArg(y) 18069 v1.AddArg(v2) 18070 v.AddArg(v1) 18071 return true 18072 } 18073 } 18074 func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool { 18075 b := v.Block 18076 _ = b 18077 // match: (Lsh64x8 <t> x y) 18078 // cond: 18079 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 18080 for { 18081 t := v.Type 18082 x := v.Args[0] 18083 y := v.Args[1] 18084 v.reset(OpAMD64ANDQ) 18085 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 18086 v0.AddArg(x) 18087 v0.AddArg(y) 18088 v.AddArg(v0) 18089 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 18090 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 18091 v2.AuxInt = 64 18092 v2.AddArg(y) 18093 v1.AddArg(v2) 18094 v.AddArg(v1) 18095 return true 18096 } 18097 } 18098 func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool { 18099 b := v.Block 18100 _ = b 18101 // match: (Lsh8x16 <t> x y) 18102 // cond: 18103 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 18104 for { 18105 t := v.Type 18106 x := v.Args[0] 18107 y := v.Args[1] 18108 v.reset(OpAMD64ANDL) 18109 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18110 v0.AddArg(x) 18111 v0.AddArg(y) 18112 v.AddArg(v0) 18113 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18114 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 18115 v2.AuxInt = 32 18116 v2.AddArg(y) 18117 v1.AddArg(v2) 18118 v.AddArg(v1) 18119 return true 18120 } 18121 } 18122 func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool { 18123 b := v.Block 18124 _ = b 18125 // match: (Lsh8x32 <t> x y) 18126 // cond: 18127 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 18128 for { 18129 t := v.Type 18130 x := v.Args[0] 18131 y := v.Args[1] 18132 v.reset(OpAMD64ANDL) 18133 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18134 v0.AddArg(x) 18135 v0.AddArg(y) 18136 v.AddArg(v0) 18137 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18138 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 18139 v2.AuxInt = 32 18140 v2.AddArg(y) 18141 v1.AddArg(v2) 18142 v.AddArg(v1) 18143 return true 18144 } 18145 } 18146 func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool { 18147 b := v.Block 18148 _ = b 18149 // match: (Lsh8x64 <t> x y) 18150 // cond: 18151 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 18152 for { 18153 t := v.Type 18154 x := v.Args[0] 18155 y := v.Args[1] 18156 v.reset(OpAMD64ANDL) 18157 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18158 v0.AddArg(x) 18159 v0.AddArg(y) 18160 v.AddArg(v0) 18161 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18162 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 18163 v2.AuxInt = 32 18164 v2.AddArg(y) 18165 v1.AddArg(v2) 18166 v.AddArg(v1) 18167 return true 18168 } 18169 } 18170 func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool { 18171 b := v.Block 18172 _ = b 18173 // match: (Lsh8x8 <t> x y) 18174 // cond: 18175 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 18176 for { 18177 t := v.Type 18178 x := v.Args[0] 18179 y := v.Args[1] 18180 v.reset(OpAMD64ANDL) 18181 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18182 v0.AddArg(x) 18183 v0.AddArg(y) 18184 v.AddArg(v0) 18185 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18186 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 18187 v2.AuxInt = 32 18188 v2.AddArg(y) 18189 v1.AddArg(v2) 18190 v.AddArg(v1) 18191 return true 18192 } 18193 } 18194 func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool { 18195 b := v.Block 18196 _ = b 18197 // match: (Mod16 x y) 18198 // cond: 18199 // result: (Select1 (DIVW x y)) 18200 for { 18201 x := v.Args[0] 18202 y := v.Args[1] 18203 v.reset(OpSelect1) 18204 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 18205 v0.AddArg(x) 18206 v0.AddArg(y) 18207 v.AddArg(v0) 18208 return true 18209 } 18210 } 18211 func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool { 18212 b := v.Block 18213 _ = b 18214 // match: (Mod16u x y) 18215 // cond: 18216 // result: (Select1 (DIVWU x y)) 18217 for { 18218 x := v.Args[0] 18219 y := v.Args[1] 18220 v.reset(OpSelect1) 18221 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 18222 v0.AddArg(x) 18223 v0.AddArg(y) 18224 v.AddArg(v0) 18225 return true 18226 } 18227 } 18228 func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool { 18229 b := v.Block 18230 _ = b 18231 // match: (Mod32 x y) 18232 // cond: 18233 // result: (Select1 (DIVL x y)) 18234 for { 18235 x := v.Args[0] 18236 y := v.Args[1] 18237 v.reset(OpSelect1) 18238 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32())) 18239 v0.AddArg(x) 18240 v0.AddArg(y) 18241 v.AddArg(v0) 18242 return true 18243 } 18244 } 18245 func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool { 18246 b := v.Block 18247 _ = b 18248 // match: (Mod32u x y) 18249 // cond: 18250 // result: (Select1 (DIVLU x y)) 18251 for { 18252 x := v.Args[0] 18253 y := v.Args[1] 18254 v.reset(OpSelect1) 18255 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32())) 18256 v0.AddArg(x) 18257 v0.AddArg(y) 18258 v.AddArg(v0) 18259 return true 18260 } 18261 } 18262 func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool { 18263 b := v.Block 18264 _ = b 18265 // match: (Mod64 x y) 18266 // cond: 18267 // result: (Select1 (DIVQ x y)) 18268 for { 18269 x := v.Args[0] 18270 y := v.Args[1] 18271 v.reset(OpSelect1) 18272 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64())) 18273 v0.AddArg(x) 18274 v0.AddArg(y) 18275 v.AddArg(v0) 18276 return true 18277 } 18278 } 18279 func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool { 18280 b := v.Block 18281 _ = b 18282 // match: (Mod64u x y) 18283 // cond: 18284 // result: (Select1 (DIVQU x y)) 18285 for { 18286 x := v.Args[0] 18287 y := v.Args[1] 18288 v.reset(OpSelect1) 18289 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64())) 18290 v0.AddArg(x) 18291 v0.AddArg(y) 18292 v.AddArg(v0) 18293 return true 18294 } 18295 } 18296 func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool { 18297 b := v.Block 18298 _ = b 18299 // match: (Mod8 x y) 18300 // cond: 18301 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 18302 for { 18303 x := v.Args[0] 18304 y := v.Args[1] 18305 v.reset(OpSelect1) 18306 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 18307 v1 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16()) 18308 v1.AddArg(x) 18309 v0.AddArg(v1) 18310 v2 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16()) 18311 v2.AddArg(y) 18312 v0.AddArg(v2) 18313 v.AddArg(v0) 18314 return true 18315 } 18316 } 18317 func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool { 18318 b := v.Block 18319 _ = b 18320 // match: (Mod8u x y) 18321 // cond: 18322 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 18323 for { 18324 x := v.Args[0] 18325 y := v.Args[1] 18326 v.reset(OpSelect1) 18327 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 18328 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16()) 18329 v1.AddArg(x) 18330 v0.AddArg(v1) 18331 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16()) 18332 v2.AddArg(y) 18333 v0.AddArg(v2) 18334 v.AddArg(v0) 18335 return true 18336 } 18337 } 18338 func rewriteValueAMD64_OpMove(v *Value, config *Config) bool { 18339 b := v.Block 18340 _ = b 18341 // match: (Move [s] _ _ mem) 18342 // cond: SizeAndAlign(s).Size() == 0 18343 // result: mem 18344 for { 18345 s := v.AuxInt 18346 mem := v.Args[2] 18347 if !(SizeAndAlign(s).Size() == 0) { 18348 break 18349 } 18350 v.reset(OpCopy) 18351 v.Type = mem.Type 18352 v.AddArg(mem) 18353 return true 18354 } 18355 // match: (Move [s] dst src mem) 18356 // cond: SizeAndAlign(s).Size() == 1 18357 // result: (MOVBstore dst (MOVBload src mem) mem) 18358 for { 18359 s := v.AuxInt 18360 dst := v.Args[0] 18361 src := v.Args[1] 18362 mem := v.Args[2] 18363 if !(SizeAndAlign(s).Size() == 1) { 18364 break 18365 } 18366 v.reset(OpAMD64MOVBstore) 18367 v.AddArg(dst) 18368 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, config.fe.TypeUInt8()) 18369 v0.AddArg(src) 18370 v0.AddArg(mem) 18371 v.AddArg(v0) 18372 v.AddArg(mem) 18373 return true 18374 } 18375 // match: (Move [s] dst src mem) 18376 // cond: SizeAndAlign(s).Size() == 2 18377 // result: (MOVWstore dst (MOVWload src mem) mem) 18378 for { 18379 s := v.AuxInt 18380 dst := v.Args[0] 18381 src := v.Args[1] 18382 mem := v.Args[2] 18383 if !(SizeAndAlign(s).Size() == 2) { 18384 break 18385 } 18386 v.reset(OpAMD64MOVWstore) 18387 v.AddArg(dst) 18388 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16()) 18389 v0.AddArg(src) 18390 v0.AddArg(mem) 18391 v.AddArg(v0) 18392 v.AddArg(mem) 18393 return true 18394 } 18395 // match: (Move [s] dst src mem) 18396 // cond: SizeAndAlign(s).Size() == 4 18397 // result: (MOVLstore dst (MOVLload src mem) mem) 18398 for { 18399 s := v.AuxInt 18400 dst := v.Args[0] 18401 src := v.Args[1] 18402 mem := v.Args[2] 18403 if !(SizeAndAlign(s).Size() == 4) { 18404 break 18405 } 18406 v.reset(OpAMD64MOVLstore) 18407 v.AddArg(dst) 18408 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 18409 v0.AddArg(src) 18410 v0.AddArg(mem) 18411 v.AddArg(v0) 18412 v.AddArg(mem) 18413 return true 18414 } 18415 // match: (Move [s] dst src mem) 18416 // cond: SizeAndAlign(s).Size() == 8 18417 // result: (MOVQstore dst (MOVQload src mem) mem) 18418 for { 18419 s := v.AuxInt 18420 dst := v.Args[0] 18421 src := v.Args[1] 18422 mem := v.Args[2] 18423 if !(SizeAndAlign(s).Size() == 8) { 18424 break 18425 } 18426 v.reset(OpAMD64MOVQstore) 18427 v.AddArg(dst) 18428 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64()) 18429 v0.AddArg(src) 18430 v0.AddArg(mem) 18431 v.AddArg(v0) 18432 v.AddArg(mem) 18433 return true 18434 } 18435 // match: (Move [s] dst src mem) 18436 // cond: SizeAndAlign(s).Size() == 16 18437 // result: (MOVOstore dst (MOVOload src mem) mem) 18438 for { 18439 s := v.AuxInt 18440 dst := v.Args[0] 18441 src := v.Args[1] 18442 mem := v.Args[2] 18443 if !(SizeAndAlign(s).Size() == 16) { 18444 break 18445 } 18446 v.reset(OpAMD64MOVOstore) 18447 v.AddArg(dst) 18448 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, TypeInt128) 18449 v0.AddArg(src) 18450 v0.AddArg(mem) 18451 v.AddArg(v0) 18452 v.AddArg(mem) 18453 return true 18454 } 18455 // match: (Move [s] dst src mem) 18456 // cond: SizeAndAlign(s).Size() == 3 18457 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 18458 for { 18459 s := v.AuxInt 18460 dst := v.Args[0] 18461 src := v.Args[1] 18462 mem := v.Args[2] 18463 if !(SizeAndAlign(s).Size() == 3) { 18464 break 18465 } 18466 v.reset(OpAMD64MOVBstore) 18467 v.AuxInt = 2 18468 v.AddArg(dst) 18469 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, config.fe.TypeUInt8()) 18470 v0.AuxInt = 2 18471 v0.AddArg(src) 18472 v0.AddArg(mem) 18473 v.AddArg(v0) 18474 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, TypeMem) 18475 v1.AddArg(dst) 18476 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16()) 18477 v2.AddArg(src) 18478 v2.AddArg(mem) 18479 v1.AddArg(v2) 18480 v1.AddArg(mem) 18481 v.AddArg(v1) 18482 return true 18483 } 18484 // match: (Move [s] dst src mem) 18485 // cond: SizeAndAlign(s).Size() == 5 18486 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 18487 for { 18488 s := v.AuxInt 18489 dst := v.Args[0] 18490 src := v.Args[1] 18491 mem := v.Args[2] 18492 if !(SizeAndAlign(s).Size() == 5) { 18493 break 18494 } 18495 v.reset(OpAMD64MOVBstore) 18496 v.AuxInt = 4 18497 v.AddArg(dst) 18498 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, config.fe.TypeUInt8()) 18499 v0.AuxInt = 4 18500 v0.AddArg(src) 18501 v0.AddArg(mem) 18502 v.AddArg(v0) 18503 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem) 18504 v1.AddArg(dst) 18505 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 18506 v2.AddArg(src) 18507 v2.AddArg(mem) 18508 v1.AddArg(v2) 18509 v1.AddArg(mem) 18510 v.AddArg(v1) 18511 return true 18512 } 18513 // match: (Move [s] dst src mem) 18514 // cond: SizeAndAlign(s).Size() == 6 18515 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 18516 for { 18517 s := v.AuxInt 18518 dst := v.Args[0] 18519 src := v.Args[1] 18520 mem := v.Args[2] 18521 if !(SizeAndAlign(s).Size() == 6) { 18522 break 18523 } 18524 v.reset(OpAMD64MOVWstore) 18525 v.AuxInt = 4 18526 v.AddArg(dst) 18527 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16()) 18528 v0.AuxInt = 4 18529 v0.AddArg(src) 18530 v0.AddArg(mem) 18531 v.AddArg(v0) 18532 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem) 18533 v1.AddArg(dst) 18534 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 18535 v2.AddArg(src) 18536 v2.AddArg(mem) 18537 v1.AddArg(v2) 18538 v1.AddArg(mem) 18539 v.AddArg(v1) 18540 return true 18541 } 18542 // match: (Move [s] dst src mem) 18543 // cond: SizeAndAlign(s).Size() == 7 18544 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 18545 for { 18546 s := v.AuxInt 18547 dst := v.Args[0] 18548 src := v.Args[1] 18549 mem := v.Args[2] 18550 if !(SizeAndAlign(s).Size() == 7) { 18551 break 18552 } 18553 v.reset(OpAMD64MOVLstore) 18554 v.AuxInt = 3 18555 v.AddArg(dst) 18556 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 18557 v0.AuxInt = 3 18558 v0.AddArg(src) 18559 v0.AddArg(mem) 18560 v.AddArg(v0) 18561 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem) 18562 v1.AddArg(dst) 18563 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 18564 v2.AddArg(src) 18565 v2.AddArg(mem) 18566 v1.AddArg(v2) 18567 v1.AddArg(mem) 18568 v.AddArg(v1) 18569 return true 18570 } 18571 // match: (Move [s] dst src mem) 18572 // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16 18573 // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 18574 for { 18575 s := v.AuxInt 18576 dst := v.Args[0] 18577 src := v.Args[1] 18578 mem := v.Args[2] 18579 if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) { 18580 break 18581 } 18582 v.reset(OpAMD64MOVQstore) 18583 v.AuxInt = SizeAndAlign(s).Size() - 8 18584 v.AddArg(dst) 18585 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64()) 18586 v0.AuxInt = SizeAndAlign(s).Size() - 8 18587 v0.AddArg(src) 18588 v0.AddArg(mem) 18589 v.AddArg(v0) 18590 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem) 18591 v1.AddArg(dst) 18592 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64()) 18593 v2.AddArg(src) 18594 v2.AddArg(mem) 18595 v1.AddArg(v2) 18596 v1.AddArg(mem) 18597 v.AddArg(v1) 18598 return true 18599 } 18600 // match: (Move [s] dst src mem) 18601 // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8 18602 // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem)) 18603 for { 18604 s := v.AuxInt 18605 dst := v.Args[0] 18606 src := v.Args[1] 18607 mem := v.Args[2] 18608 if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) { 18609 break 18610 } 18611 v.reset(OpMove) 18612 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16 18613 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 18614 v0.AuxInt = SizeAndAlign(s).Size() % 16 18615 v0.AddArg(dst) 18616 v.AddArg(v0) 18617 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 18618 v1.AuxInt = SizeAndAlign(s).Size() % 16 18619 v1.AddArg(src) 18620 v.AddArg(v1) 18621 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem) 18622 v2.AddArg(dst) 18623 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64()) 18624 v3.AddArg(src) 18625 v3.AddArg(mem) 18626 v2.AddArg(v3) 18627 v2.AddArg(mem) 18628 v.AddArg(v2) 18629 return true 18630 } 18631 // match: (Move [s] dst src mem) 18632 // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8 18633 // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem)) 18634 for { 18635 s := v.AuxInt 18636 dst := v.Args[0] 18637 src := v.Args[1] 18638 mem := v.Args[2] 18639 if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) { 18640 break 18641 } 18642 v.reset(OpMove) 18643 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16 18644 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 18645 v0.AuxInt = SizeAndAlign(s).Size() % 16 18646 v0.AddArg(dst) 18647 v.AddArg(v0) 18648 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 18649 v1.AuxInt = SizeAndAlign(s).Size() % 16 18650 v1.AddArg(src) 18651 v.AddArg(v1) 18652 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, TypeMem) 18653 v2.AddArg(dst) 18654 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, TypeInt128) 18655 v3.AddArg(src) 18656 v3.AddArg(mem) 18657 v2.AddArg(v3) 18658 v2.AddArg(mem) 18659 v.AddArg(v2) 18660 return true 18661 } 18662 // match: (Move [s] dst src mem) 18663 // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice 18664 // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem) 18665 for { 18666 s := v.AuxInt 18667 dst := v.Args[0] 18668 src := v.Args[1] 18669 mem := v.Args[2] 18670 if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) { 18671 break 18672 } 18673 v.reset(OpAMD64DUFFCOPY) 18674 v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16) 18675 v.AddArg(dst) 18676 v.AddArg(src) 18677 v.AddArg(mem) 18678 return true 18679 } 18680 // match: (Move [s] dst src mem) 18681 // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0 18682 // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem) 18683 for { 18684 s := v.AuxInt 18685 dst := v.Args[0] 18686 src := v.Args[1] 18687 mem := v.Args[2] 18688 if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) { 18689 break 18690 } 18691 v.reset(OpAMD64REPMOVSQ) 18692 v.AddArg(dst) 18693 v.AddArg(src) 18694 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 18695 v0.AuxInt = SizeAndAlign(s).Size() / 8 18696 v.AddArg(v0) 18697 v.AddArg(mem) 18698 return true 18699 } 18700 return false 18701 } 18702 func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool { 18703 b := v.Block 18704 _ = b 18705 // match: (Mul16 x y) 18706 // cond: 18707 // result: (MULL x y) 18708 for { 18709 x := v.Args[0] 18710 y := v.Args[1] 18711 v.reset(OpAMD64MULL) 18712 v.AddArg(x) 18713 v.AddArg(y) 18714 return true 18715 } 18716 } 18717 func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool { 18718 b := v.Block 18719 _ = b 18720 // match: (Mul32 x y) 18721 // cond: 18722 // result: (MULL x y) 18723 for { 18724 x := v.Args[0] 18725 y := v.Args[1] 18726 v.reset(OpAMD64MULL) 18727 v.AddArg(x) 18728 v.AddArg(y) 18729 return true 18730 } 18731 } 18732 func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool { 18733 b := v.Block 18734 _ = b 18735 // match: (Mul32F x y) 18736 // cond: 18737 // result: (MULSS x y) 18738 for { 18739 x := v.Args[0] 18740 y := v.Args[1] 18741 v.reset(OpAMD64MULSS) 18742 v.AddArg(x) 18743 v.AddArg(y) 18744 return true 18745 } 18746 } 18747 func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool { 18748 b := v.Block 18749 _ = b 18750 // match: (Mul64 x y) 18751 // cond: 18752 // result: (MULQ x y) 18753 for { 18754 x := v.Args[0] 18755 y := v.Args[1] 18756 v.reset(OpAMD64MULQ) 18757 v.AddArg(x) 18758 v.AddArg(y) 18759 return true 18760 } 18761 } 18762 func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool { 18763 b := v.Block 18764 _ = b 18765 // match: (Mul64F x y) 18766 // cond: 18767 // result: (MULSD x y) 18768 for { 18769 x := v.Args[0] 18770 y := v.Args[1] 18771 v.reset(OpAMD64MULSD) 18772 v.AddArg(x) 18773 v.AddArg(y) 18774 return true 18775 } 18776 } 18777 func rewriteValueAMD64_OpMul64uhilo(v *Value, config *Config) bool { 18778 b := v.Block 18779 _ = b 18780 // match: (Mul64uhilo x y) 18781 // cond: 18782 // result: (MULQU2 x y) 18783 for { 18784 x := v.Args[0] 18785 y := v.Args[1] 18786 v.reset(OpAMD64MULQU2) 18787 v.AddArg(x) 18788 v.AddArg(y) 18789 return true 18790 } 18791 } 18792 func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool { 18793 b := v.Block 18794 _ = b 18795 // match: (Mul8 x y) 18796 // cond: 18797 // result: (MULL x y) 18798 for { 18799 x := v.Args[0] 18800 y := v.Args[1] 18801 v.reset(OpAMD64MULL) 18802 v.AddArg(x) 18803 v.AddArg(y) 18804 return true 18805 } 18806 } 18807 func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool { 18808 b := v.Block 18809 _ = b 18810 // match: (Neg16 x) 18811 // cond: 18812 // result: (NEGL x) 18813 for { 18814 x := v.Args[0] 18815 v.reset(OpAMD64NEGL) 18816 v.AddArg(x) 18817 return true 18818 } 18819 } 18820 func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool { 18821 b := v.Block 18822 _ = b 18823 // match: (Neg32 x) 18824 // cond: 18825 // result: (NEGL x) 18826 for { 18827 x := v.Args[0] 18828 v.reset(OpAMD64NEGL) 18829 v.AddArg(x) 18830 return true 18831 } 18832 } 18833 func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool { 18834 b := v.Block 18835 _ = b 18836 // match: (Neg32F x) 18837 // cond: 18838 // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))])) 18839 for { 18840 x := v.Args[0] 18841 v.reset(OpAMD64PXOR) 18842 v.AddArg(x) 18843 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, config.Frontend().TypeFloat32()) 18844 v0.AuxInt = f2i(math.Copysign(0, -1)) 18845 v.AddArg(v0) 18846 return true 18847 } 18848 } 18849 func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool { 18850 b := v.Block 18851 _ = b 18852 // match: (Neg64 x) 18853 // cond: 18854 // result: (NEGQ x) 18855 for { 18856 x := v.Args[0] 18857 v.reset(OpAMD64NEGQ) 18858 v.AddArg(x) 18859 return true 18860 } 18861 } 18862 func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool { 18863 b := v.Block 18864 _ = b 18865 // match: (Neg64F x) 18866 // cond: 18867 // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))])) 18868 for { 18869 x := v.Args[0] 18870 v.reset(OpAMD64PXOR) 18871 v.AddArg(x) 18872 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, config.Frontend().TypeFloat64()) 18873 v0.AuxInt = f2i(math.Copysign(0, -1)) 18874 v.AddArg(v0) 18875 return true 18876 } 18877 } 18878 func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool { 18879 b := v.Block 18880 _ = b 18881 // match: (Neg8 x) 18882 // cond: 18883 // result: (NEGL x) 18884 for { 18885 x := v.Args[0] 18886 v.reset(OpAMD64NEGL) 18887 v.AddArg(x) 18888 return true 18889 } 18890 } 18891 func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool { 18892 b := v.Block 18893 _ = b 18894 // match: (Neq16 x y) 18895 // cond: 18896 // result: (SETNE (CMPW x y)) 18897 for { 18898 x := v.Args[0] 18899 y := v.Args[1] 18900 v.reset(OpAMD64SETNE) 18901 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 18902 v0.AddArg(x) 18903 v0.AddArg(y) 18904 v.AddArg(v0) 18905 return true 18906 } 18907 } 18908 func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool { 18909 b := v.Block 18910 _ = b 18911 // match: (Neq32 x y) 18912 // cond: 18913 // result: (SETNE (CMPL x y)) 18914 for { 18915 x := v.Args[0] 18916 y := v.Args[1] 18917 v.reset(OpAMD64SETNE) 18918 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 18919 v0.AddArg(x) 18920 v0.AddArg(y) 18921 v.AddArg(v0) 18922 return true 18923 } 18924 } 18925 func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool { 18926 b := v.Block 18927 _ = b 18928 // match: (Neq32F x y) 18929 // cond: 18930 // result: (SETNEF (UCOMISS x y)) 18931 for { 18932 x := v.Args[0] 18933 y := v.Args[1] 18934 v.reset(OpAMD64SETNEF) 18935 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 18936 v0.AddArg(x) 18937 v0.AddArg(y) 18938 v.AddArg(v0) 18939 return true 18940 } 18941 } 18942 func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool { 18943 b := v.Block 18944 _ = b 18945 // match: (Neq64 x y) 18946 // cond: 18947 // result: (SETNE (CMPQ x y)) 18948 for { 18949 x := v.Args[0] 18950 y := v.Args[1] 18951 v.reset(OpAMD64SETNE) 18952 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 18953 v0.AddArg(x) 18954 v0.AddArg(y) 18955 v.AddArg(v0) 18956 return true 18957 } 18958 } 18959 func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool { 18960 b := v.Block 18961 _ = b 18962 // match: (Neq64F x y) 18963 // cond: 18964 // result: (SETNEF (UCOMISD x y)) 18965 for { 18966 x := v.Args[0] 18967 y := v.Args[1] 18968 v.reset(OpAMD64SETNEF) 18969 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 18970 v0.AddArg(x) 18971 v0.AddArg(y) 18972 v.AddArg(v0) 18973 return true 18974 } 18975 } 18976 func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool { 18977 b := v.Block 18978 _ = b 18979 // match: (Neq8 x y) 18980 // cond: 18981 // result: (SETNE (CMPB x y)) 18982 for { 18983 x := v.Args[0] 18984 y := v.Args[1] 18985 v.reset(OpAMD64SETNE) 18986 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 18987 v0.AddArg(x) 18988 v0.AddArg(y) 18989 v.AddArg(v0) 18990 return true 18991 } 18992 } 18993 func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool { 18994 b := v.Block 18995 _ = b 18996 // match: (NeqB x y) 18997 // cond: 18998 // result: (SETNE (CMPB x y)) 18999 for { 19000 x := v.Args[0] 19001 y := v.Args[1] 19002 v.reset(OpAMD64SETNE) 19003 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 19004 v0.AddArg(x) 19005 v0.AddArg(y) 19006 v.AddArg(v0) 19007 return true 19008 } 19009 } 19010 func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool { 19011 b := v.Block 19012 _ = b 19013 // match: (NeqPtr x y) 19014 // cond: config.PtrSize == 8 19015 // result: (SETNE (CMPQ x y)) 19016 for { 19017 x := v.Args[0] 19018 y := v.Args[1] 19019 if !(config.PtrSize == 8) { 19020 break 19021 } 19022 v.reset(OpAMD64SETNE) 19023 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 19024 v0.AddArg(x) 19025 v0.AddArg(y) 19026 v.AddArg(v0) 19027 return true 19028 } 19029 // match: (NeqPtr x y) 19030 // cond: config.PtrSize == 4 19031 // result: (SETNE (CMPL x y)) 19032 for { 19033 x := v.Args[0] 19034 y := v.Args[1] 19035 if !(config.PtrSize == 4) { 19036 break 19037 } 19038 v.reset(OpAMD64SETNE) 19039 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 19040 v0.AddArg(x) 19041 v0.AddArg(y) 19042 v.AddArg(v0) 19043 return true 19044 } 19045 return false 19046 } 19047 func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool { 19048 b := v.Block 19049 _ = b 19050 // match: (NilCheck ptr mem) 19051 // cond: 19052 // result: (LoweredNilCheck ptr mem) 19053 for { 19054 ptr := v.Args[0] 19055 mem := v.Args[1] 19056 v.reset(OpAMD64LoweredNilCheck) 19057 v.AddArg(ptr) 19058 v.AddArg(mem) 19059 return true 19060 } 19061 } 19062 func rewriteValueAMD64_OpNot(v *Value, config *Config) bool { 19063 b := v.Block 19064 _ = b 19065 // match: (Not x) 19066 // cond: 19067 // result: (XORLconst [1] x) 19068 for { 19069 x := v.Args[0] 19070 v.reset(OpAMD64XORLconst) 19071 v.AuxInt = 1 19072 v.AddArg(x) 19073 return true 19074 } 19075 } 19076 func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool { 19077 b := v.Block 19078 _ = b 19079 // match: (OffPtr [off] ptr) 19080 // cond: config.PtrSize == 8 && is32Bit(off) 19081 // result: (ADDQconst [off] ptr) 19082 for { 19083 off := v.AuxInt 19084 ptr := v.Args[0] 19085 if !(config.PtrSize == 8 && is32Bit(off)) { 19086 break 19087 } 19088 v.reset(OpAMD64ADDQconst) 19089 v.AuxInt = off 19090 v.AddArg(ptr) 19091 return true 19092 } 19093 // match: (OffPtr [off] ptr) 19094 // cond: config.PtrSize == 8 19095 // result: (ADDQ (MOVQconst [off]) ptr) 19096 for { 19097 off := v.AuxInt 19098 ptr := v.Args[0] 19099 if !(config.PtrSize == 8) { 19100 break 19101 } 19102 v.reset(OpAMD64ADDQ) 19103 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 19104 v0.AuxInt = off 19105 v.AddArg(v0) 19106 v.AddArg(ptr) 19107 return true 19108 } 19109 // match: (OffPtr [off] ptr) 19110 // cond: config.PtrSize == 4 19111 // result: (ADDLconst [off] ptr) 19112 for { 19113 off := v.AuxInt 19114 ptr := v.Args[0] 19115 if !(config.PtrSize == 4) { 19116 break 19117 } 19118 v.reset(OpAMD64ADDLconst) 19119 v.AuxInt = off 19120 v.AddArg(ptr) 19121 return true 19122 } 19123 return false 19124 } 19125 func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool { 19126 b := v.Block 19127 _ = b 19128 // match: (Or16 x y) 19129 // cond: 19130 // result: (ORL x y) 19131 for { 19132 x := v.Args[0] 19133 y := v.Args[1] 19134 v.reset(OpAMD64ORL) 19135 v.AddArg(x) 19136 v.AddArg(y) 19137 return true 19138 } 19139 } 19140 func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool { 19141 b := v.Block 19142 _ = b 19143 // match: (Or32 x y) 19144 // cond: 19145 // result: (ORL x y) 19146 for { 19147 x := v.Args[0] 19148 y := v.Args[1] 19149 v.reset(OpAMD64ORL) 19150 v.AddArg(x) 19151 v.AddArg(y) 19152 return true 19153 } 19154 } 19155 func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool { 19156 b := v.Block 19157 _ = b 19158 // match: (Or64 x y) 19159 // cond: 19160 // result: (ORQ x y) 19161 for { 19162 x := v.Args[0] 19163 y := v.Args[1] 19164 v.reset(OpAMD64ORQ) 19165 v.AddArg(x) 19166 v.AddArg(y) 19167 return true 19168 } 19169 } 19170 func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool { 19171 b := v.Block 19172 _ = b 19173 // match: (Or8 x y) 19174 // cond: 19175 // result: (ORL x y) 19176 for { 19177 x := v.Args[0] 19178 y := v.Args[1] 19179 v.reset(OpAMD64ORL) 19180 v.AddArg(x) 19181 v.AddArg(y) 19182 return true 19183 } 19184 } 19185 func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool { 19186 b := v.Block 19187 _ = b 19188 // match: (OrB x y) 19189 // cond: 19190 // result: (ORL x y) 19191 for { 19192 x := v.Args[0] 19193 y := v.Args[1] 19194 v.reset(OpAMD64ORL) 19195 v.AddArg(x) 19196 v.AddArg(y) 19197 return true 19198 } 19199 } 19200 func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool { 19201 b := v.Block 19202 _ = b 19203 // match: (Rsh16Ux16 <t> x y) 19204 // cond: 19205 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 19206 for { 19207 t := v.Type 19208 x := v.Args[0] 19209 y := v.Args[1] 19210 v.reset(OpAMD64ANDL) 19211 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 19212 v0.AddArg(x) 19213 v0.AddArg(y) 19214 v.AddArg(v0) 19215 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19216 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 19217 v2.AuxInt = 16 19218 v2.AddArg(y) 19219 v1.AddArg(v2) 19220 v.AddArg(v1) 19221 return true 19222 } 19223 } 19224 func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool { 19225 b := v.Block 19226 _ = b 19227 // match: (Rsh16Ux32 <t> x y) 19228 // cond: 19229 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 19230 for { 19231 t := v.Type 19232 x := v.Args[0] 19233 y := v.Args[1] 19234 v.reset(OpAMD64ANDL) 19235 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 19236 v0.AddArg(x) 19237 v0.AddArg(y) 19238 v.AddArg(v0) 19239 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19240 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 19241 v2.AuxInt = 16 19242 v2.AddArg(y) 19243 v1.AddArg(v2) 19244 v.AddArg(v1) 19245 return true 19246 } 19247 } 19248 func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool { 19249 b := v.Block 19250 _ = b 19251 // match: (Rsh16Ux64 <t> x y) 19252 // cond: 19253 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 19254 for { 19255 t := v.Type 19256 x := v.Args[0] 19257 y := v.Args[1] 19258 v.reset(OpAMD64ANDL) 19259 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 19260 v0.AddArg(x) 19261 v0.AddArg(y) 19262 v.AddArg(v0) 19263 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19264 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 19265 v2.AuxInt = 16 19266 v2.AddArg(y) 19267 v1.AddArg(v2) 19268 v.AddArg(v1) 19269 return true 19270 } 19271 } 19272 func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool { 19273 b := v.Block 19274 _ = b 19275 // match: (Rsh16Ux8 <t> x y) 19276 // cond: 19277 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 19278 for { 19279 t := v.Type 19280 x := v.Args[0] 19281 y := v.Args[1] 19282 v.reset(OpAMD64ANDL) 19283 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 19284 v0.AddArg(x) 19285 v0.AddArg(y) 19286 v.AddArg(v0) 19287 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19288 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 19289 v2.AuxInt = 16 19290 v2.AddArg(y) 19291 v1.AddArg(v2) 19292 v.AddArg(v1) 19293 return true 19294 } 19295 } 19296 func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool { 19297 b := v.Block 19298 _ = b 19299 // match: (Rsh16x16 <t> x y) 19300 // cond: 19301 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 19302 for { 19303 t := v.Type 19304 x := v.Args[0] 19305 y := v.Args[1] 19306 v.reset(OpAMD64SARW) 19307 v.Type = t 19308 v.AddArg(x) 19309 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19310 v0.AddArg(y) 19311 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19312 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19313 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 19314 v3.AuxInt = 16 19315 v3.AddArg(y) 19316 v2.AddArg(v3) 19317 v1.AddArg(v2) 19318 v0.AddArg(v1) 19319 v.AddArg(v0) 19320 return true 19321 } 19322 } 19323 func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool { 19324 b := v.Block 19325 _ = b 19326 // match: (Rsh16x32 <t> x y) 19327 // cond: 19328 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 19329 for { 19330 t := v.Type 19331 x := v.Args[0] 19332 y := v.Args[1] 19333 v.reset(OpAMD64SARW) 19334 v.Type = t 19335 v.AddArg(x) 19336 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19337 v0.AddArg(y) 19338 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19339 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19340 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 19341 v3.AuxInt = 16 19342 v3.AddArg(y) 19343 v2.AddArg(v3) 19344 v1.AddArg(v2) 19345 v0.AddArg(v1) 19346 v.AddArg(v0) 19347 return true 19348 } 19349 } 19350 func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool { 19351 b := v.Block 19352 _ = b 19353 // match: (Rsh16x64 <t> x y) 19354 // cond: 19355 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 19356 for { 19357 t := v.Type 19358 x := v.Args[0] 19359 y := v.Args[1] 19360 v.reset(OpAMD64SARW) 19361 v.Type = t 19362 v.AddArg(x) 19363 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 19364 v0.AddArg(y) 19365 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 19366 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 19367 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 19368 v3.AuxInt = 16 19369 v3.AddArg(y) 19370 v2.AddArg(v3) 19371 v1.AddArg(v2) 19372 v0.AddArg(v1) 19373 v.AddArg(v0) 19374 return true 19375 } 19376 } 19377 func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { 19378 b := v.Block 19379 _ = b 19380 // match: (Rsh16x8 <t> x y) 19381 // cond: 19382 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 19383 for { 19384 t := v.Type 19385 x := v.Args[0] 19386 y := v.Args[1] 19387 v.reset(OpAMD64SARW) 19388 v.Type = t 19389 v.AddArg(x) 19390 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19391 v0.AddArg(y) 19392 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19393 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19394 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 19395 v3.AuxInt = 16 19396 v3.AddArg(y) 19397 v2.AddArg(v3) 19398 v1.AddArg(v2) 19399 v0.AddArg(v1) 19400 v.AddArg(v0) 19401 return true 19402 } 19403 } 19404 func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool { 19405 b := v.Block 19406 _ = b 19407 // match: (Rsh32Ux16 <t> x y) 19408 // cond: 19409 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 19410 for { 19411 t := v.Type 19412 x := v.Args[0] 19413 y := v.Args[1] 19414 v.reset(OpAMD64ANDL) 19415 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 19416 v0.AddArg(x) 19417 v0.AddArg(y) 19418 v.AddArg(v0) 19419 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19420 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 19421 v2.AuxInt = 32 19422 v2.AddArg(y) 19423 v1.AddArg(v2) 19424 v.AddArg(v1) 19425 return true 19426 } 19427 } 19428 func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool { 19429 b := v.Block 19430 _ = b 19431 // match: (Rsh32Ux32 <t> x y) 19432 // cond: 19433 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 19434 for { 19435 t := v.Type 19436 x := v.Args[0] 19437 y := v.Args[1] 19438 v.reset(OpAMD64ANDL) 19439 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 19440 v0.AddArg(x) 19441 v0.AddArg(y) 19442 v.AddArg(v0) 19443 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19444 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 19445 v2.AuxInt = 32 19446 v2.AddArg(y) 19447 v1.AddArg(v2) 19448 v.AddArg(v1) 19449 return true 19450 } 19451 } 19452 func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool { 19453 b := v.Block 19454 _ = b 19455 // match: (Rsh32Ux64 <t> x y) 19456 // cond: 19457 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 19458 for { 19459 t := v.Type 19460 x := v.Args[0] 19461 y := v.Args[1] 19462 v.reset(OpAMD64ANDL) 19463 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 19464 v0.AddArg(x) 19465 v0.AddArg(y) 19466 v.AddArg(v0) 19467 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19468 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 19469 v2.AuxInt = 32 19470 v2.AddArg(y) 19471 v1.AddArg(v2) 19472 v.AddArg(v1) 19473 return true 19474 } 19475 } 19476 func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool { 19477 b := v.Block 19478 _ = b 19479 // match: (Rsh32Ux8 <t> x y) 19480 // cond: 19481 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 19482 for { 19483 t := v.Type 19484 x := v.Args[0] 19485 y := v.Args[1] 19486 v.reset(OpAMD64ANDL) 19487 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 19488 v0.AddArg(x) 19489 v0.AddArg(y) 19490 v.AddArg(v0) 19491 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19492 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 19493 v2.AuxInt = 32 19494 v2.AddArg(y) 19495 v1.AddArg(v2) 19496 v.AddArg(v1) 19497 return true 19498 } 19499 } 19500 func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool { 19501 b := v.Block 19502 _ = b 19503 // match: (Rsh32x16 <t> x y) 19504 // cond: 19505 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 19506 for { 19507 t := v.Type 19508 x := v.Args[0] 19509 y := v.Args[1] 19510 v.reset(OpAMD64SARL) 19511 v.Type = t 19512 v.AddArg(x) 19513 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19514 v0.AddArg(y) 19515 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19516 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19517 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 19518 v3.AuxInt = 32 19519 v3.AddArg(y) 19520 v2.AddArg(v3) 19521 v1.AddArg(v2) 19522 v0.AddArg(v1) 19523 v.AddArg(v0) 19524 return true 19525 } 19526 } 19527 func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool { 19528 b := v.Block 19529 _ = b 19530 // match: (Rsh32x32 <t> x y) 19531 // cond: 19532 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 19533 for { 19534 t := v.Type 19535 x := v.Args[0] 19536 y := v.Args[1] 19537 v.reset(OpAMD64SARL) 19538 v.Type = t 19539 v.AddArg(x) 19540 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19541 v0.AddArg(y) 19542 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19543 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19544 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 19545 v3.AuxInt = 32 19546 v3.AddArg(y) 19547 v2.AddArg(v3) 19548 v1.AddArg(v2) 19549 v0.AddArg(v1) 19550 v.AddArg(v0) 19551 return true 19552 } 19553 } 19554 func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool { 19555 b := v.Block 19556 _ = b 19557 // match: (Rsh32x64 <t> x y) 19558 // cond: 19559 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 19560 for { 19561 t := v.Type 19562 x := v.Args[0] 19563 y := v.Args[1] 19564 v.reset(OpAMD64SARL) 19565 v.Type = t 19566 v.AddArg(x) 19567 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 19568 v0.AddArg(y) 19569 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 19570 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 19571 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 19572 v3.AuxInt = 32 19573 v3.AddArg(y) 19574 v2.AddArg(v3) 19575 v1.AddArg(v2) 19576 v0.AddArg(v1) 19577 v.AddArg(v0) 19578 return true 19579 } 19580 } 19581 func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { 19582 b := v.Block 19583 _ = b 19584 // match: (Rsh32x8 <t> x y) 19585 // cond: 19586 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 19587 for { 19588 t := v.Type 19589 x := v.Args[0] 19590 y := v.Args[1] 19591 v.reset(OpAMD64SARL) 19592 v.Type = t 19593 v.AddArg(x) 19594 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19595 v0.AddArg(y) 19596 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19597 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19598 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 19599 v3.AuxInt = 32 19600 v3.AddArg(y) 19601 v2.AddArg(v3) 19602 v1.AddArg(v2) 19603 v0.AddArg(v1) 19604 v.AddArg(v0) 19605 return true 19606 } 19607 } 19608 func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool { 19609 b := v.Block 19610 _ = b 19611 // match: (Rsh64Ux16 <t> x y) 19612 // cond: 19613 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 19614 for { 19615 t := v.Type 19616 x := v.Args[0] 19617 y := v.Args[1] 19618 v.reset(OpAMD64ANDQ) 19619 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 19620 v0.AddArg(x) 19621 v0.AddArg(y) 19622 v.AddArg(v0) 19623 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 19624 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 19625 v2.AuxInt = 64 19626 v2.AddArg(y) 19627 v1.AddArg(v2) 19628 v.AddArg(v1) 19629 return true 19630 } 19631 } 19632 func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool { 19633 b := v.Block 19634 _ = b 19635 // match: (Rsh64Ux32 <t> x y) 19636 // cond: 19637 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 19638 for { 19639 t := v.Type 19640 x := v.Args[0] 19641 y := v.Args[1] 19642 v.reset(OpAMD64ANDQ) 19643 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 19644 v0.AddArg(x) 19645 v0.AddArg(y) 19646 v.AddArg(v0) 19647 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 19648 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 19649 v2.AuxInt = 64 19650 v2.AddArg(y) 19651 v1.AddArg(v2) 19652 v.AddArg(v1) 19653 return true 19654 } 19655 } 19656 func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool { 19657 b := v.Block 19658 _ = b 19659 // match: (Rsh64Ux64 <t> x y) 19660 // cond: 19661 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 19662 for { 19663 t := v.Type 19664 x := v.Args[0] 19665 y := v.Args[1] 19666 v.reset(OpAMD64ANDQ) 19667 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 19668 v0.AddArg(x) 19669 v0.AddArg(y) 19670 v.AddArg(v0) 19671 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 19672 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 19673 v2.AuxInt = 64 19674 v2.AddArg(y) 19675 v1.AddArg(v2) 19676 v.AddArg(v1) 19677 return true 19678 } 19679 } 19680 func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool { 19681 b := v.Block 19682 _ = b 19683 // match: (Rsh64Ux8 <t> x y) 19684 // cond: 19685 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 19686 for { 19687 t := v.Type 19688 x := v.Args[0] 19689 y := v.Args[1] 19690 v.reset(OpAMD64ANDQ) 19691 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 19692 v0.AddArg(x) 19693 v0.AddArg(y) 19694 v.AddArg(v0) 19695 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 19696 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 19697 v2.AuxInt = 64 19698 v2.AddArg(y) 19699 v1.AddArg(v2) 19700 v.AddArg(v1) 19701 return true 19702 } 19703 } 19704 func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool { 19705 b := v.Block 19706 _ = b 19707 // match: (Rsh64x16 <t> x y) 19708 // cond: 19709 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 19710 for { 19711 t := v.Type 19712 x := v.Args[0] 19713 y := v.Args[1] 19714 v.reset(OpAMD64SARQ) 19715 v.Type = t 19716 v.AddArg(x) 19717 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19718 v0.AddArg(y) 19719 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19720 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19721 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 19722 v3.AuxInt = 64 19723 v3.AddArg(y) 19724 v2.AddArg(v3) 19725 v1.AddArg(v2) 19726 v0.AddArg(v1) 19727 v.AddArg(v0) 19728 return true 19729 } 19730 } 19731 func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool { 19732 b := v.Block 19733 _ = b 19734 // match: (Rsh64x32 <t> x y) 19735 // cond: 19736 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 19737 for { 19738 t := v.Type 19739 x := v.Args[0] 19740 y := v.Args[1] 19741 v.reset(OpAMD64SARQ) 19742 v.Type = t 19743 v.AddArg(x) 19744 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19745 v0.AddArg(y) 19746 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19747 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19748 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 19749 v3.AuxInt = 64 19750 v3.AddArg(y) 19751 v2.AddArg(v3) 19752 v1.AddArg(v2) 19753 v0.AddArg(v1) 19754 v.AddArg(v0) 19755 return true 19756 } 19757 } 19758 func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool { 19759 b := v.Block 19760 _ = b 19761 // match: (Rsh64x64 <t> x y) 19762 // cond: 19763 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 19764 for { 19765 t := v.Type 19766 x := v.Args[0] 19767 y := v.Args[1] 19768 v.reset(OpAMD64SARQ) 19769 v.Type = t 19770 v.AddArg(x) 19771 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 19772 v0.AddArg(y) 19773 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 19774 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 19775 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 19776 v3.AuxInt = 64 19777 v3.AddArg(y) 19778 v2.AddArg(v3) 19779 v1.AddArg(v2) 19780 v0.AddArg(v1) 19781 v.AddArg(v0) 19782 return true 19783 } 19784 } 19785 func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { 19786 b := v.Block 19787 _ = b 19788 // match: (Rsh64x8 <t> x y) 19789 // cond: 19790 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 19791 for { 19792 t := v.Type 19793 x := v.Args[0] 19794 y := v.Args[1] 19795 v.reset(OpAMD64SARQ) 19796 v.Type = t 19797 v.AddArg(x) 19798 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19799 v0.AddArg(y) 19800 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19801 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19802 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 19803 v3.AuxInt = 64 19804 v3.AddArg(y) 19805 v2.AddArg(v3) 19806 v1.AddArg(v2) 19807 v0.AddArg(v1) 19808 v.AddArg(v0) 19809 return true 19810 } 19811 } 19812 func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool { 19813 b := v.Block 19814 _ = b 19815 // match: (Rsh8Ux16 <t> x y) 19816 // cond: 19817 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 19818 for { 19819 t := v.Type 19820 x := v.Args[0] 19821 y := v.Args[1] 19822 v.reset(OpAMD64ANDL) 19823 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 19824 v0.AddArg(x) 19825 v0.AddArg(y) 19826 v.AddArg(v0) 19827 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19828 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 19829 v2.AuxInt = 8 19830 v2.AddArg(y) 19831 v1.AddArg(v2) 19832 v.AddArg(v1) 19833 return true 19834 } 19835 } 19836 func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool { 19837 b := v.Block 19838 _ = b 19839 // match: (Rsh8Ux32 <t> x y) 19840 // cond: 19841 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 19842 for { 19843 t := v.Type 19844 x := v.Args[0] 19845 y := v.Args[1] 19846 v.reset(OpAMD64ANDL) 19847 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 19848 v0.AddArg(x) 19849 v0.AddArg(y) 19850 v.AddArg(v0) 19851 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19852 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 19853 v2.AuxInt = 8 19854 v2.AddArg(y) 19855 v1.AddArg(v2) 19856 v.AddArg(v1) 19857 return true 19858 } 19859 } 19860 func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool { 19861 b := v.Block 19862 _ = b 19863 // match: (Rsh8Ux64 <t> x y) 19864 // cond: 19865 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 19866 for { 19867 t := v.Type 19868 x := v.Args[0] 19869 y := v.Args[1] 19870 v.reset(OpAMD64ANDL) 19871 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 19872 v0.AddArg(x) 19873 v0.AddArg(y) 19874 v.AddArg(v0) 19875 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19876 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 19877 v2.AuxInt = 8 19878 v2.AddArg(y) 19879 v1.AddArg(v2) 19880 v.AddArg(v1) 19881 return true 19882 } 19883 } 19884 func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool { 19885 b := v.Block 19886 _ = b 19887 // match: (Rsh8Ux8 <t> x y) 19888 // cond: 19889 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 19890 for { 19891 t := v.Type 19892 x := v.Args[0] 19893 y := v.Args[1] 19894 v.reset(OpAMD64ANDL) 19895 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 19896 v0.AddArg(x) 19897 v0.AddArg(y) 19898 v.AddArg(v0) 19899 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19900 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 19901 v2.AuxInt = 8 19902 v2.AddArg(y) 19903 v1.AddArg(v2) 19904 v.AddArg(v1) 19905 return true 19906 } 19907 } 19908 func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool { 19909 b := v.Block 19910 _ = b 19911 // match: (Rsh8x16 <t> x y) 19912 // cond: 19913 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 19914 for { 19915 t := v.Type 19916 x := v.Args[0] 19917 y := v.Args[1] 19918 v.reset(OpAMD64SARB) 19919 v.Type = t 19920 v.AddArg(x) 19921 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19922 v0.AddArg(y) 19923 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19924 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19925 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 19926 v3.AuxInt = 8 19927 v3.AddArg(y) 19928 v2.AddArg(v3) 19929 v1.AddArg(v2) 19930 v0.AddArg(v1) 19931 v.AddArg(v0) 19932 return true 19933 } 19934 } 19935 func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool { 19936 b := v.Block 19937 _ = b 19938 // match: (Rsh8x32 <t> x y) 19939 // cond: 19940 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 19941 for { 19942 t := v.Type 19943 x := v.Args[0] 19944 y := v.Args[1] 19945 v.reset(OpAMD64SARB) 19946 v.Type = t 19947 v.AddArg(x) 19948 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19949 v0.AddArg(y) 19950 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19951 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19952 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 19953 v3.AuxInt = 8 19954 v3.AddArg(y) 19955 v2.AddArg(v3) 19956 v1.AddArg(v2) 19957 v0.AddArg(v1) 19958 v.AddArg(v0) 19959 return true 19960 } 19961 } 19962 func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool { 19963 b := v.Block 19964 _ = b 19965 // match: (Rsh8x64 <t> x y) 19966 // cond: 19967 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 19968 for { 19969 t := v.Type 19970 x := v.Args[0] 19971 y := v.Args[1] 19972 v.reset(OpAMD64SARB) 19973 v.Type = t 19974 v.AddArg(x) 19975 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 19976 v0.AddArg(y) 19977 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 19978 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 19979 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 19980 v3.AuxInt = 8 19981 v3.AddArg(y) 19982 v2.AddArg(v3) 19983 v1.AddArg(v2) 19984 v0.AddArg(v1) 19985 v.AddArg(v0) 19986 return true 19987 } 19988 } 19989 func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { 19990 b := v.Block 19991 _ = b 19992 // match: (Rsh8x8 <t> x y) 19993 // cond: 19994 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 19995 for { 19996 t := v.Type 19997 x := v.Args[0] 19998 y := v.Args[1] 19999 v.reset(OpAMD64SARB) 20000 v.Type = t 20001 v.AddArg(x) 20002 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 20003 v0.AddArg(y) 20004 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 20005 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 20006 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 20007 v3.AuxInt = 8 20008 v3.AddArg(y) 20009 v2.AddArg(v3) 20010 v1.AddArg(v2) 20011 v0.AddArg(v1) 20012 v.AddArg(v0) 20013 return true 20014 } 20015 } 20016 func rewriteValueAMD64_OpSelect0(v *Value, config *Config) bool { 20017 b := v.Block 20018 _ = b 20019 // match: (Select0 <t> (AddTupleFirst32 tuple val)) 20020 // cond: 20021 // result: (ADDL val (Select0 <t> tuple)) 20022 for { 20023 t := v.Type 20024 v_0 := v.Args[0] 20025 if v_0.Op != OpAMD64AddTupleFirst32 { 20026 break 20027 } 20028 tuple := v_0.Args[0] 20029 val := v_0.Args[1] 20030 v.reset(OpAMD64ADDL) 20031 v.AddArg(val) 20032 v0 := b.NewValue0(v.Pos, OpSelect0, t) 20033 v0.AddArg(tuple) 20034 v.AddArg(v0) 20035 return true 20036 } 20037 // match: (Select0 <t> (AddTupleFirst64 tuple val)) 20038 // cond: 20039 // result: (ADDQ val (Select0 <t> tuple)) 20040 for { 20041 t := v.Type 20042 v_0 := v.Args[0] 20043 if v_0.Op != OpAMD64AddTupleFirst64 { 20044 break 20045 } 20046 tuple := v_0.Args[0] 20047 val := v_0.Args[1] 20048 v.reset(OpAMD64ADDQ) 20049 v.AddArg(val) 20050 v0 := b.NewValue0(v.Pos, OpSelect0, t) 20051 v0.AddArg(tuple) 20052 v.AddArg(v0) 20053 return true 20054 } 20055 return false 20056 } 20057 func rewriteValueAMD64_OpSelect1(v *Value, config *Config) bool { 20058 b := v.Block 20059 _ = b 20060 // match: (Select1 (AddTupleFirst32 tuple _ )) 20061 // cond: 20062 // result: (Select1 tuple) 20063 for { 20064 v_0 := v.Args[0] 20065 if v_0.Op != OpAMD64AddTupleFirst32 { 20066 break 20067 } 20068 tuple := v_0.Args[0] 20069 v.reset(OpSelect1) 20070 v.AddArg(tuple) 20071 return true 20072 } 20073 // match: (Select1 (AddTupleFirst64 tuple _ )) 20074 // cond: 20075 // result: (Select1 tuple) 20076 for { 20077 v_0 := v.Args[0] 20078 if v_0.Op != OpAMD64AddTupleFirst64 { 20079 break 20080 } 20081 tuple := v_0.Args[0] 20082 v.reset(OpSelect1) 20083 v.AddArg(tuple) 20084 return true 20085 } 20086 return false 20087 } 20088 func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool { 20089 b := v.Block 20090 _ = b 20091 // match: (SignExt16to32 x) 20092 // cond: 20093 // result: (MOVWQSX x) 20094 for { 20095 x := v.Args[0] 20096 v.reset(OpAMD64MOVWQSX) 20097 v.AddArg(x) 20098 return true 20099 } 20100 } 20101 func rewriteValueAMD64_OpSignExt16to64(v *Value, config *Config) bool { 20102 b := v.Block 20103 _ = b 20104 // match: (SignExt16to64 x) 20105 // cond: 20106 // result: (MOVWQSX x) 20107 for { 20108 x := v.Args[0] 20109 v.reset(OpAMD64MOVWQSX) 20110 v.AddArg(x) 20111 return true 20112 } 20113 } 20114 func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool { 20115 b := v.Block 20116 _ = b 20117 // match: (SignExt32to64 x) 20118 // cond: 20119 // result: (MOVLQSX x) 20120 for { 20121 x := v.Args[0] 20122 v.reset(OpAMD64MOVLQSX) 20123 v.AddArg(x) 20124 return true 20125 } 20126 } 20127 func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool { 20128 b := v.Block 20129 _ = b 20130 // match: (SignExt8to16 x) 20131 // cond: 20132 // result: (MOVBQSX x) 20133 for { 20134 x := v.Args[0] 20135 v.reset(OpAMD64MOVBQSX) 20136 v.AddArg(x) 20137 return true 20138 } 20139 } 20140 func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool { 20141 b := v.Block 20142 _ = b 20143 // match: (SignExt8to32 x) 20144 // cond: 20145 // result: (MOVBQSX x) 20146 for { 20147 x := v.Args[0] 20148 v.reset(OpAMD64MOVBQSX) 20149 v.AddArg(x) 20150 return true 20151 } 20152 } 20153 func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool { 20154 b := v.Block 20155 _ = b 20156 // match: (SignExt8to64 x) 20157 // cond: 20158 // result: (MOVBQSX x) 20159 for { 20160 x := v.Args[0] 20161 v.reset(OpAMD64MOVBQSX) 20162 v.AddArg(x) 20163 return true 20164 } 20165 } 20166 func rewriteValueAMD64_OpSlicemask(v *Value, config *Config) bool { 20167 b := v.Block 20168 _ = b 20169 // match: (Slicemask <t> x) 20170 // cond: 20171 // result: (SARQconst (NEGQ <t> x) [63]) 20172 for { 20173 t := v.Type 20174 x := v.Args[0] 20175 v.reset(OpAMD64SARQconst) 20176 v.AuxInt = 63 20177 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 20178 v0.AddArg(x) 20179 v.AddArg(v0) 20180 return true 20181 } 20182 } 20183 func rewriteValueAMD64_OpSqrt(v *Value, config *Config) bool { 20184 b := v.Block 20185 _ = b 20186 // match: (Sqrt x) 20187 // cond: 20188 // result: (SQRTSD x) 20189 for { 20190 x := v.Args[0] 20191 v.reset(OpAMD64SQRTSD) 20192 v.AddArg(x) 20193 return true 20194 } 20195 } 20196 func rewriteValueAMD64_OpStaticCall(v *Value, config *Config) bool { 20197 b := v.Block 20198 _ = b 20199 // match: (StaticCall [argwid] {target} mem) 20200 // cond: 20201 // result: (CALLstatic [argwid] {target} mem) 20202 for { 20203 argwid := v.AuxInt 20204 target := v.Aux 20205 mem := v.Args[0] 20206 v.reset(OpAMD64CALLstatic) 20207 v.AuxInt = argwid 20208 v.Aux = target 20209 v.AddArg(mem) 20210 return true 20211 } 20212 } 20213 func rewriteValueAMD64_OpStore(v *Value, config *Config) bool { 20214 b := v.Block 20215 _ = b 20216 // match: (Store [8] ptr val mem) 20217 // cond: is64BitFloat(val.Type) 20218 // result: (MOVSDstore ptr val mem) 20219 for { 20220 if v.AuxInt != 8 { 20221 break 20222 } 20223 ptr := v.Args[0] 20224 val := v.Args[1] 20225 mem := v.Args[2] 20226 if !(is64BitFloat(val.Type)) { 20227 break 20228 } 20229 v.reset(OpAMD64MOVSDstore) 20230 v.AddArg(ptr) 20231 v.AddArg(val) 20232 v.AddArg(mem) 20233 return true 20234 } 20235 // match: (Store [4] ptr val mem) 20236 // cond: is32BitFloat(val.Type) 20237 // result: (MOVSSstore ptr val mem) 20238 for { 20239 if v.AuxInt != 4 { 20240 break 20241 } 20242 ptr := v.Args[0] 20243 val := v.Args[1] 20244 mem := v.Args[2] 20245 if !(is32BitFloat(val.Type)) { 20246 break 20247 } 20248 v.reset(OpAMD64MOVSSstore) 20249 v.AddArg(ptr) 20250 v.AddArg(val) 20251 v.AddArg(mem) 20252 return true 20253 } 20254 // match: (Store [8] ptr val mem) 20255 // cond: 20256 // result: (MOVQstore ptr val mem) 20257 for { 20258 if v.AuxInt != 8 { 20259 break 20260 } 20261 ptr := v.Args[0] 20262 val := v.Args[1] 20263 mem := v.Args[2] 20264 v.reset(OpAMD64MOVQstore) 20265 v.AddArg(ptr) 20266 v.AddArg(val) 20267 v.AddArg(mem) 20268 return true 20269 } 20270 // match: (Store [4] ptr val mem) 20271 // cond: 20272 // result: (MOVLstore ptr val mem) 20273 for { 20274 if v.AuxInt != 4 { 20275 break 20276 } 20277 ptr := v.Args[0] 20278 val := v.Args[1] 20279 mem := v.Args[2] 20280 v.reset(OpAMD64MOVLstore) 20281 v.AddArg(ptr) 20282 v.AddArg(val) 20283 v.AddArg(mem) 20284 return true 20285 } 20286 // match: (Store [2] ptr val mem) 20287 // cond: 20288 // result: (MOVWstore ptr val mem) 20289 for { 20290 if v.AuxInt != 2 { 20291 break 20292 } 20293 ptr := v.Args[0] 20294 val := v.Args[1] 20295 mem := v.Args[2] 20296 v.reset(OpAMD64MOVWstore) 20297 v.AddArg(ptr) 20298 v.AddArg(val) 20299 v.AddArg(mem) 20300 return true 20301 } 20302 // match: (Store [1] ptr val mem) 20303 // cond: 20304 // result: (MOVBstore ptr val mem) 20305 for { 20306 if v.AuxInt != 1 { 20307 break 20308 } 20309 ptr := v.Args[0] 20310 val := v.Args[1] 20311 mem := v.Args[2] 20312 v.reset(OpAMD64MOVBstore) 20313 v.AddArg(ptr) 20314 v.AddArg(val) 20315 v.AddArg(mem) 20316 return true 20317 } 20318 return false 20319 } 20320 func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool { 20321 b := v.Block 20322 _ = b 20323 // match: (Sub16 x y) 20324 // cond: 20325 // result: (SUBL x y) 20326 for { 20327 x := v.Args[0] 20328 y := v.Args[1] 20329 v.reset(OpAMD64SUBL) 20330 v.AddArg(x) 20331 v.AddArg(y) 20332 return true 20333 } 20334 } 20335 func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool { 20336 b := v.Block 20337 _ = b 20338 // match: (Sub32 x y) 20339 // cond: 20340 // result: (SUBL x y) 20341 for { 20342 x := v.Args[0] 20343 y := v.Args[1] 20344 v.reset(OpAMD64SUBL) 20345 v.AddArg(x) 20346 v.AddArg(y) 20347 return true 20348 } 20349 } 20350 func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool { 20351 b := v.Block 20352 _ = b 20353 // match: (Sub32F x y) 20354 // cond: 20355 // result: (SUBSS x y) 20356 for { 20357 x := v.Args[0] 20358 y := v.Args[1] 20359 v.reset(OpAMD64SUBSS) 20360 v.AddArg(x) 20361 v.AddArg(y) 20362 return true 20363 } 20364 } 20365 func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool { 20366 b := v.Block 20367 _ = b 20368 // match: (Sub64 x y) 20369 // cond: 20370 // result: (SUBQ x y) 20371 for { 20372 x := v.Args[0] 20373 y := v.Args[1] 20374 v.reset(OpAMD64SUBQ) 20375 v.AddArg(x) 20376 v.AddArg(y) 20377 return true 20378 } 20379 } 20380 func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool { 20381 b := v.Block 20382 _ = b 20383 // match: (Sub64F x y) 20384 // cond: 20385 // result: (SUBSD x y) 20386 for { 20387 x := v.Args[0] 20388 y := v.Args[1] 20389 v.reset(OpAMD64SUBSD) 20390 v.AddArg(x) 20391 v.AddArg(y) 20392 return true 20393 } 20394 } 20395 func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool { 20396 b := v.Block 20397 _ = b 20398 // match: (Sub8 x y) 20399 // cond: 20400 // result: (SUBL x y) 20401 for { 20402 x := v.Args[0] 20403 y := v.Args[1] 20404 v.reset(OpAMD64SUBL) 20405 v.AddArg(x) 20406 v.AddArg(y) 20407 return true 20408 } 20409 } 20410 func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool { 20411 b := v.Block 20412 _ = b 20413 // match: (SubPtr x y) 20414 // cond: config.PtrSize == 8 20415 // result: (SUBQ x y) 20416 for { 20417 x := v.Args[0] 20418 y := v.Args[1] 20419 if !(config.PtrSize == 8) { 20420 break 20421 } 20422 v.reset(OpAMD64SUBQ) 20423 v.AddArg(x) 20424 v.AddArg(y) 20425 return true 20426 } 20427 // match: (SubPtr x y) 20428 // cond: config.PtrSize == 4 20429 // result: (SUBL x y) 20430 for { 20431 x := v.Args[0] 20432 y := v.Args[1] 20433 if !(config.PtrSize == 4) { 20434 break 20435 } 20436 v.reset(OpAMD64SUBL) 20437 v.AddArg(x) 20438 v.AddArg(y) 20439 return true 20440 } 20441 return false 20442 } 20443 func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool { 20444 b := v.Block 20445 _ = b 20446 // match: (Trunc16to8 x) 20447 // cond: 20448 // result: x 20449 for { 20450 x := v.Args[0] 20451 v.reset(OpCopy) 20452 v.Type = x.Type 20453 v.AddArg(x) 20454 return true 20455 } 20456 } 20457 func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool { 20458 b := v.Block 20459 _ = b 20460 // match: (Trunc32to16 x) 20461 // cond: 20462 // result: x 20463 for { 20464 x := v.Args[0] 20465 v.reset(OpCopy) 20466 v.Type = x.Type 20467 v.AddArg(x) 20468 return true 20469 } 20470 } 20471 func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool { 20472 b := v.Block 20473 _ = b 20474 // match: (Trunc32to8 x) 20475 // cond: 20476 // result: x 20477 for { 20478 x := v.Args[0] 20479 v.reset(OpCopy) 20480 v.Type = x.Type 20481 v.AddArg(x) 20482 return true 20483 } 20484 } 20485 func rewriteValueAMD64_OpTrunc64to16(v *Value, config *Config) bool { 20486 b := v.Block 20487 _ = b 20488 // match: (Trunc64to16 x) 20489 // cond: 20490 // result: x 20491 for { 20492 x := v.Args[0] 20493 v.reset(OpCopy) 20494 v.Type = x.Type 20495 v.AddArg(x) 20496 return true 20497 } 20498 } 20499 func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool { 20500 b := v.Block 20501 _ = b 20502 // match: (Trunc64to32 x) 20503 // cond: 20504 // result: x 20505 for { 20506 x := v.Args[0] 20507 v.reset(OpCopy) 20508 v.Type = x.Type 20509 v.AddArg(x) 20510 return true 20511 } 20512 } 20513 func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool { 20514 b := v.Block 20515 _ = b 20516 // match: (Trunc64to8 x) 20517 // cond: 20518 // result: x 20519 for { 20520 x := v.Args[0] 20521 v.reset(OpCopy) 20522 v.Type = x.Type 20523 v.AddArg(x) 20524 return true 20525 } 20526 } 20527 func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool { 20528 b := v.Block 20529 _ = b 20530 // match: (Xor16 x y) 20531 // cond: 20532 // result: (XORL x y) 20533 for { 20534 x := v.Args[0] 20535 y := v.Args[1] 20536 v.reset(OpAMD64XORL) 20537 v.AddArg(x) 20538 v.AddArg(y) 20539 return true 20540 } 20541 } 20542 func rewriteValueAMD64_OpXor32(v *Value, config *Config) bool { 20543 b := v.Block 20544 _ = b 20545 // match: (Xor32 x y) 20546 // cond: 20547 // result: (XORL x y) 20548 for { 20549 x := v.Args[0] 20550 y := v.Args[1] 20551 v.reset(OpAMD64XORL) 20552 v.AddArg(x) 20553 v.AddArg(y) 20554 return true 20555 } 20556 } 20557 func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool { 20558 b := v.Block 20559 _ = b 20560 // match: (Xor64 x y) 20561 // cond: 20562 // result: (XORQ x y) 20563 for { 20564 x := v.Args[0] 20565 y := v.Args[1] 20566 v.reset(OpAMD64XORQ) 20567 v.AddArg(x) 20568 v.AddArg(y) 20569 return true 20570 } 20571 } 20572 func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool { 20573 b := v.Block 20574 _ = b 20575 // match: (Xor8 x y) 20576 // cond: 20577 // result: (XORL x y) 20578 for { 20579 x := v.Args[0] 20580 y := v.Args[1] 20581 v.reset(OpAMD64XORL) 20582 v.AddArg(x) 20583 v.AddArg(y) 20584 return true 20585 } 20586 } 20587 func rewriteValueAMD64_OpZero(v *Value, config *Config) bool { 20588 b := v.Block 20589 _ = b 20590 // match: (Zero [s] _ mem) 20591 // cond: SizeAndAlign(s).Size() == 0 20592 // result: mem 20593 for { 20594 s := v.AuxInt 20595 mem := v.Args[1] 20596 if !(SizeAndAlign(s).Size() == 0) { 20597 break 20598 } 20599 v.reset(OpCopy) 20600 v.Type = mem.Type 20601 v.AddArg(mem) 20602 return true 20603 } 20604 // match: (Zero [s] destptr mem) 20605 // cond: SizeAndAlign(s).Size() == 1 20606 // result: (MOVBstoreconst [0] destptr mem) 20607 for { 20608 s := v.AuxInt 20609 destptr := v.Args[0] 20610 mem := v.Args[1] 20611 if !(SizeAndAlign(s).Size() == 1) { 20612 break 20613 } 20614 v.reset(OpAMD64MOVBstoreconst) 20615 v.AuxInt = 0 20616 v.AddArg(destptr) 20617 v.AddArg(mem) 20618 return true 20619 } 20620 // match: (Zero [s] destptr mem) 20621 // cond: SizeAndAlign(s).Size() == 2 20622 // result: (MOVWstoreconst [0] destptr mem) 20623 for { 20624 s := v.AuxInt 20625 destptr := v.Args[0] 20626 mem := v.Args[1] 20627 if !(SizeAndAlign(s).Size() == 2) { 20628 break 20629 } 20630 v.reset(OpAMD64MOVWstoreconst) 20631 v.AuxInt = 0 20632 v.AddArg(destptr) 20633 v.AddArg(mem) 20634 return true 20635 } 20636 // match: (Zero [s] destptr mem) 20637 // cond: SizeAndAlign(s).Size() == 4 20638 // result: (MOVLstoreconst [0] destptr mem) 20639 for { 20640 s := v.AuxInt 20641 destptr := v.Args[0] 20642 mem := v.Args[1] 20643 if !(SizeAndAlign(s).Size() == 4) { 20644 break 20645 } 20646 v.reset(OpAMD64MOVLstoreconst) 20647 v.AuxInt = 0 20648 v.AddArg(destptr) 20649 v.AddArg(mem) 20650 return true 20651 } 20652 // match: (Zero [s] destptr mem) 20653 // cond: SizeAndAlign(s).Size() == 8 20654 // result: (MOVQstoreconst [0] destptr mem) 20655 for { 20656 s := v.AuxInt 20657 destptr := v.Args[0] 20658 mem := v.Args[1] 20659 if !(SizeAndAlign(s).Size() == 8) { 20660 break 20661 } 20662 v.reset(OpAMD64MOVQstoreconst) 20663 v.AuxInt = 0 20664 v.AddArg(destptr) 20665 v.AddArg(mem) 20666 return true 20667 } 20668 // match: (Zero [s] destptr mem) 20669 // cond: SizeAndAlign(s).Size() == 3 20670 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 20671 for { 20672 s := v.AuxInt 20673 destptr := v.Args[0] 20674 mem := v.Args[1] 20675 if !(SizeAndAlign(s).Size() == 3) { 20676 break 20677 } 20678 v.reset(OpAMD64MOVBstoreconst) 20679 v.AuxInt = makeValAndOff(0, 2) 20680 v.AddArg(destptr) 20681 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, TypeMem) 20682 v0.AuxInt = 0 20683 v0.AddArg(destptr) 20684 v0.AddArg(mem) 20685 v.AddArg(v0) 20686 return true 20687 } 20688 // match: (Zero [s] destptr mem) 20689 // cond: SizeAndAlign(s).Size() == 5 20690 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 20691 for { 20692 s := v.AuxInt 20693 destptr := v.Args[0] 20694 mem := v.Args[1] 20695 if !(SizeAndAlign(s).Size() == 5) { 20696 break 20697 } 20698 v.reset(OpAMD64MOVBstoreconst) 20699 v.AuxInt = makeValAndOff(0, 4) 20700 v.AddArg(destptr) 20701 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem) 20702 v0.AuxInt = 0 20703 v0.AddArg(destptr) 20704 v0.AddArg(mem) 20705 v.AddArg(v0) 20706 return true 20707 } 20708 // match: (Zero [s] destptr mem) 20709 // cond: SizeAndAlign(s).Size() == 6 20710 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 20711 for { 20712 s := v.AuxInt 20713 destptr := v.Args[0] 20714 mem := v.Args[1] 20715 if !(SizeAndAlign(s).Size() == 6) { 20716 break 20717 } 20718 v.reset(OpAMD64MOVWstoreconst) 20719 v.AuxInt = makeValAndOff(0, 4) 20720 v.AddArg(destptr) 20721 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem) 20722 v0.AuxInt = 0 20723 v0.AddArg(destptr) 20724 v0.AddArg(mem) 20725 v.AddArg(v0) 20726 return true 20727 } 20728 // match: (Zero [s] destptr mem) 20729 // cond: SizeAndAlign(s).Size() == 7 20730 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 20731 for { 20732 s := v.AuxInt 20733 destptr := v.Args[0] 20734 mem := v.Args[1] 20735 if !(SizeAndAlign(s).Size() == 7) { 20736 break 20737 } 20738 v.reset(OpAMD64MOVLstoreconst) 20739 v.AuxInt = makeValAndOff(0, 3) 20740 v.AddArg(destptr) 20741 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem) 20742 v0.AuxInt = 0 20743 v0.AddArg(destptr) 20744 v0.AddArg(mem) 20745 v.AddArg(v0) 20746 return true 20747 } 20748 // match: (Zero [s] destptr mem) 20749 // cond: SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8 20750 // result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8]) (MOVQstoreconst [0] destptr mem)) 20751 for { 20752 s := v.AuxInt 20753 destptr := v.Args[0] 20754 mem := v.Args[1] 20755 if !(SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8) { 20756 break 20757 } 20758 v.reset(OpZero) 20759 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8 20760 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 20761 v0.AuxInt = SizeAndAlign(s).Size() % 8 20762 v0.AddArg(destptr) 20763 v.AddArg(v0) 20764 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 20765 v1.AuxInt = 0 20766 v1.AddArg(destptr) 20767 v1.AddArg(mem) 20768 v.AddArg(v1) 20769 return true 20770 } 20771 // match: (Zero [s] destptr mem) 20772 // cond: SizeAndAlign(s).Size() == 16 20773 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 20774 for { 20775 s := v.AuxInt 20776 destptr := v.Args[0] 20777 mem := v.Args[1] 20778 if !(SizeAndAlign(s).Size() == 16) { 20779 break 20780 } 20781 v.reset(OpAMD64MOVQstoreconst) 20782 v.AuxInt = makeValAndOff(0, 8) 20783 v.AddArg(destptr) 20784 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 20785 v0.AuxInt = 0 20786 v0.AddArg(destptr) 20787 v0.AddArg(mem) 20788 v.AddArg(v0) 20789 return true 20790 } 20791 // match: (Zero [s] destptr mem) 20792 // cond: SizeAndAlign(s).Size() == 24 20793 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 20794 for { 20795 s := v.AuxInt 20796 destptr := v.Args[0] 20797 mem := v.Args[1] 20798 if !(SizeAndAlign(s).Size() == 24) { 20799 break 20800 } 20801 v.reset(OpAMD64MOVQstoreconst) 20802 v.AuxInt = makeValAndOff(0, 16) 20803 v.AddArg(destptr) 20804 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 20805 v0.AuxInt = makeValAndOff(0, 8) 20806 v0.AddArg(destptr) 20807 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 20808 v1.AuxInt = 0 20809 v1.AddArg(destptr) 20810 v1.AddArg(mem) 20811 v0.AddArg(v1) 20812 v.AddArg(v0) 20813 return true 20814 } 20815 // match: (Zero [s] destptr mem) 20816 // cond: SizeAndAlign(s).Size() == 32 20817 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 20818 for { 20819 s := v.AuxInt 20820 destptr := v.Args[0] 20821 mem := v.Args[1] 20822 if !(SizeAndAlign(s).Size() == 32) { 20823 break 20824 } 20825 v.reset(OpAMD64MOVQstoreconst) 20826 v.AuxInt = makeValAndOff(0, 24) 20827 v.AddArg(destptr) 20828 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 20829 v0.AuxInt = makeValAndOff(0, 16) 20830 v0.AddArg(destptr) 20831 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 20832 v1.AuxInt = makeValAndOff(0, 8) 20833 v1.AddArg(destptr) 20834 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 20835 v2.AuxInt = 0 20836 v2.AddArg(destptr) 20837 v2.AddArg(mem) 20838 v1.AddArg(v2) 20839 v0.AddArg(v1) 20840 v.AddArg(v0) 20841 return true 20842 } 20843 // match: (Zero [s] destptr mem) 20844 // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice 20845 // result: (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 20846 for { 20847 s := v.AuxInt 20848 destptr := v.Args[0] 20849 mem := v.Args[1] 20850 if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice) { 20851 break 20852 } 20853 v.reset(OpZero) 20854 v.AuxInt = SizeAndAlign(s).Size() - 8 20855 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 20856 v0.AuxInt = 8 20857 v0.AddArg(destptr) 20858 v.AddArg(v0) 20859 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem) 20860 v1.AddArg(destptr) 20861 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 20862 v2.AuxInt = 0 20863 v1.AddArg(v2) 20864 v1.AddArg(mem) 20865 v.AddArg(v1) 20866 return true 20867 } 20868 // match: (Zero [s] destptr mem) 20869 // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice 20870 // result: (DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem) 20871 for { 20872 s := v.AuxInt 20873 destptr := v.Args[0] 20874 mem := v.Args[1] 20875 if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) { 20876 break 20877 } 20878 v.reset(OpAMD64DUFFZERO) 20879 v.AuxInt = SizeAndAlign(s).Size() 20880 v.AddArg(destptr) 20881 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, TypeInt128) 20882 v0.AuxInt = 0 20883 v.AddArg(v0) 20884 v.AddArg(mem) 20885 return true 20886 } 20887 // match: (Zero [s] destptr mem) 20888 // cond: (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0 20889 // result: (REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem) 20890 for { 20891 s := v.AuxInt 20892 destptr := v.Args[0] 20893 mem := v.Args[1] 20894 if !((SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0) { 20895 break 20896 } 20897 v.reset(OpAMD64REPSTOSQ) 20898 v.AddArg(destptr) 20899 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 20900 v0.AuxInt = SizeAndAlign(s).Size() / 8 20901 v.AddArg(v0) 20902 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 20903 v1.AuxInt = 0 20904 v.AddArg(v1) 20905 v.AddArg(mem) 20906 return true 20907 } 20908 return false 20909 } 20910 func rewriteValueAMD64_OpZeroExt16to32(v *Value, config *Config) bool { 20911 b := v.Block 20912 _ = b 20913 // match: (ZeroExt16to32 x) 20914 // cond: 20915 // result: (MOVWQZX x) 20916 for { 20917 x := v.Args[0] 20918 v.reset(OpAMD64MOVWQZX) 20919 v.AddArg(x) 20920 return true 20921 } 20922 } 20923 func rewriteValueAMD64_OpZeroExt16to64(v *Value, config *Config) bool { 20924 b := v.Block 20925 _ = b 20926 // match: (ZeroExt16to64 x) 20927 // cond: 20928 // result: (MOVWQZX x) 20929 for { 20930 x := v.Args[0] 20931 v.reset(OpAMD64MOVWQZX) 20932 v.AddArg(x) 20933 return true 20934 } 20935 } 20936 func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool { 20937 b := v.Block 20938 _ = b 20939 // match: (ZeroExt32to64 x) 20940 // cond: 20941 // result: (MOVLQZX x) 20942 for { 20943 x := v.Args[0] 20944 v.reset(OpAMD64MOVLQZX) 20945 v.AddArg(x) 20946 return true 20947 } 20948 } 20949 func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool { 20950 b := v.Block 20951 _ = b 20952 // match: (ZeroExt8to16 x) 20953 // cond: 20954 // result: (MOVBQZX x) 20955 for { 20956 x := v.Args[0] 20957 v.reset(OpAMD64MOVBQZX) 20958 v.AddArg(x) 20959 return true 20960 } 20961 } 20962 func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool { 20963 b := v.Block 20964 _ = b 20965 // match: (ZeroExt8to32 x) 20966 // cond: 20967 // result: (MOVBQZX x) 20968 for { 20969 x := v.Args[0] 20970 v.reset(OpAMD64MOVBQZX) 20971 v.AddArg(x) 20972 return true 20973 } 20974 } 20975 func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool { 20976 b := v.Block 20977 _ = b 20978 // match: (ZeroExt8to64 x) 20979 // cond: 20980 // result: (MOVBQZX x) 20981 for { 20982 x := v.Args[0] 20983 v.reset(OpAMD64MOVBQZX) 20984 v.AddArg(x) 20985 return true 20986 } 20987 } 20988 func rewriteBlockAMD64(b *Block, config *Config) bool { 20989 switch b.Kind { 20990 case BlockAMD64EQ: 20991 // match: (EQ (InvertFlags cmp) yes no) 20992 // cond: 20993 // result: (EQ cmp yes no) 20994 for { 20995 v := b.Control 20996 if v.Op != OpAMD64InvertFlags { 20997 break 20998 } 20999 cmp := v.Args[0] 21000 yes := b.Succs[0] 21001 no := b.Succs[1] 21002 b.Kind = BlockAMD64EQ 21003 b.SetControl(cmp) 21004 _ = yes 21005 _ = no 21006 return true 21007 } 21008 // match: (EQ (FlagEQ) yes no) 21009 // cond: 21010 // result: (First nil yes no) 21011 for { 21012 v := b.Control 21013 if v.Op != OpAMD64FlagEQ { 21014 break 21015 } 21016 yes := b.Succs[0] 21017 no := b.Succs[1] 21018 b.Kind = BlockFirst 21019 b.SetControl(nil) 21020 _ = yes 21021 _ = no 21022 return true 21023 } 21024 // match: (EQ (FlagLT_ULT) yes no) 21025 // cond: 21026 // result: (First nil no yes) 21027 for { 21028 v := b.Control 21029 if v.Op != OpAMD64FlagLT_ULT { 21030 break 21031 } 21032 yes := b.Succs[0] 21033 no := b.Succs[1] 21034 b.Kind = BlockFirst 21035 b.SetControl(nil) 21036 b.swapSuccessors() 21037 _ = no 21038 _ = yes 21039 return true 21040 } 21041 // match: (EQ (FlagLT_UGT) yes no) 21042 // cond: 21043 // result: (First nil no yes) 21044 for { 21045 v := b.Control 21046 if v.Op != OpAMD64FlagLT_UGT { 21047 break 21048 } 21049 yes := b.Succs[0] 21050 no := b.Succs[1] 21051 b.Kind = BlockFirst 21052 b.SetControl(nil) 21053 b.swapSuccessors() 21054 _ = no 21055 _ = yes 21056 return true 21057 } 21058 // match: (EQ (FlagGT_ULT) yes no) 21059 // cond: 21060 // result: (First nil no yes) 21061 for { 21062 v := b.Control 21063 if v.Op != OpAMD64FlagGT_ULT { 21064 break 21065 } 21066 yes := b.Succs[0] 21067 no := b.Succs[1] 21068 b.Kind = BlockFirst 21069 b.SetControl(nil) 21070 b.swapSuccessors() 21071 _ = no 21072 _ = yes 21073 return true 21074 } 21075 // match: (EQ (FlagGT_UGT) yes no) 21076 // cond: 21077 // result: (First nil no yes) 21078 for { 21079 v := b.Control 21080 if v.Op != OpAMD64FlagGT_UGT { 21081 break 21082 } 21083 yes := b.Succs[0] 21084 no := b.Succs[1] 21085 b.Kind = BlockFirst 21086 b.SetControl(nil) 21087 b.swapSuccessors() 21088 _ = no 21089 _ = yes 21090 return true 21091 } 21092 case BlockAMD64GE: 21093 // match: (GE (InvertFlags cmp) yes no) 21094 // cond: 21095 // result: (LE cmp yes no) 21096 for { 21097 v := b.Control 21098 if v.Op != OpAMD64InvertFlags { 21099 break 21100 } 21101 cmp := v.Args[0] 21102 yes := b.Succs[0] 21103 no := b.Succs[1] 21104 b.Kind = BlockAMD64LE 21105 b.SetControl(cmp) 21106 _ = yes 21107 _ = no 21108 return true 21109 } 21110 // match: (GE (FlagEQ) yes no) 21111 // cond: 21112 // result: (First nil yes no) 21113 for { 21114 v := b.Control 21115 if v.Op != OpAMD64FlagEQ { 21116 break 21117 } 21118 yes := b.Succs[0] 21119 no := b.Succs[1] 21120 b.Kind = BlockFirst 21121 b.SetControl(nil) 21122 _ = yes 21123 _ = no 21124 return true 21125 } 21126 // match: (GE (FlagLT_ULT) yes no) 21127 // cond: 21128 // result: (First nil no yes) 21129 for { 21130 v := b.Control 21131 if v.Op != OpAMD64FlagLT_ULT { 21132 break 21133 } 21134 yes := b.Succs[0] 21135 no := b.Succs[1] 21136 b.Kind = BlockFirst 21137 b.SetControl(nil) 21138 b.swapSuccessors() 21139 _ = no 21140 _ = yes 21141 return true 21142 } 21143 // match: (GE (FlagLT_UGT) yes no) 21144 // cond: 21145 // result: (First nil no yes) 21146 for { 21147 v := b.Control 21148 if v.Op != OpAMD64FlagLT_UGT { 21149 break 21150 } 21151 yes := b.Succs[0] 21152 no := b.Succs[1] 21153 b.Kind = BlockFirst 21154 b.SetControl(nil) 21155 b.swapSuccessors() 21156 _ = no 21157 _ = yes 21158 return true 21159 } 21160 // match: (GE (FlagGT_ULT) yes no) 21161 // cond: 21162 // result: (First nil yes no) 21163 for { 21164 v := b.Control 21165 if v.Op != OpAMD64FlagGT_ULT { 21166 break 21167 } 21168 yes := b.Succs[0] 21169 no := b.Succs[1] 21170 b.Kind = BlockFirst 21171 b.SetControl(nil) 21172 _ = yes 21173 _ = no 21174 return true 21175 } 21176 // match: (GE (FlagGT_UGT) yes no) 21177 // cond: 21178 // result: (First nil yes no) 21179 for { 21180 v := b.Control 21181 if v.Op != OpAMD64FlagGT_UGT { 21182 break 21183 } 21184 yes := b.Succs[0] 21185 no := b.Succs[1] 21186 b.Kind = BlockFirst 21187 b.SetControl(nil) 21188 _ = yes 21189 _ = no 21190 return true 21191 } 21192 case BlockAMD64GT: 21193 // match: (GT (InvertFlags cmp) yes no) 21194 // cond: 21195 // result: (LT cmp yes no) 21196 for { 21197 v := b.Control 21198 if v.Op != OpAMD64InvertFlags { 21199 break 21200 } 21201 cmp := v.Args[0] 21202 yes := b.Succs[0] 21203 no := b.Succs[1] 21204 b.Kind = BlockAMD64LT 21205 b.SetControl(cmp) 21206 _ = yes 21207 _ = no 21208 return true 21209 } 21210 // match: (GT (FlagEQ) yes no) 21211 // cond: 21212 // result: (First nil no yes) 21213 for { 21214 v := b.Control 21215 if v.Op != OpAMD64FlagEQ { 21216 break 21217 } 21218 yes := b.Succs[0] 21219 no := b.Succs[1] 21220 b.Kind = BlockFirst 21221 b.SetControl(nil) 21222 b.swapSuccessors() 21223 _ = no 21224 _ = yes 21225 return true 21226 } 21227 // match: (GT (FlagLT_ULT) yes no) 21228 // cond: 21229 // result: (First nil no yes) 21230 for { 21231 v := b.Control 21232 if v.Op != OpAMD64FlagLT_ULT { 21233 break 21234 } 21235 yes := b.Succs[0] 21236 no := b.Succs[1] 21237 b.Kind = BlockFirst 21238 b.SetControl(nil) 21239 b.swapSuccessors() 21240 _ = no 21241 _ = yes 21242 return true 21243 } 21244 // match: (GT (FlagLT_UGT) yes no) 21245 // cond: 21246 // result: (First nil no yes) 21247 for { 21248 v := b.Control 21249 if v.Op != OpAMD64FlagLT_UGT { 21250 break 21251 } 21252 yes := b.Succs[0] 21253 no := b.Succs[1] 21254 b.Kind = BlockFirst 21255 b.SetControl(nil) 21256 b.swapSuccessors() 21257 _ = no 21258 _ = yes 21259 return true 21260 } 21261 // match: (GT (FlagGT_ULT) yes no) 21262 // cond: 21263 // result: (First nil yes no) 21264 for { 21265 v := b.Control 21266 if v.Op != OpAMD64FlagGT_ULT { 21267 break 21268 } 21269 yes := b.Succs[0] 21270 no := b.Succs[1] 21271 b.Kind = BlockFirst 21272 b.SetControl(nil) 21273 _ = yes 21274 _ = no 21275 return true 21276 } 21277 // match: (GT (FlagGT_UGT) yes no) 21278 // cond: 21279 // result: (First nil yes no) 21280 for { 21281 v := b.Control 21282 if v.Op != OpAMD64FlagGT_UGT { 21283 break 21284 } 21285 yes := b.Succs[0] 21286 no := b.Succs[1] 21287 b.Kind = BlockFirst 21288 b.SetControl(nil) 21289 _ = yes 21290 _ = no 21291 return true 21292 } 21293 case BlockIf: 21294 // match: (If (SETL cmp) yes no) 21295 // cond: 21296 // result: (LT cmp yes no) 21297 for { 21298 v := b.Control 21299 if v.Op != OpAMD64SETL { 21300 break 21301 } 21302 cmp := v.Args[0] 21303 yes := b.Succs[0] 21304 no := b.Succs[1] 21305 b.Kind = BlockAMD64LT 21306 b.SetControl(cmp) 21307 _ = yes 21308 _ = no 21309 return true 21310 } 21311 // match: (If (SETLE cmp) yes no) 21312 // cond: 21313 // result: (LE cmp yes no) 21314 for { 21315 v := b.Control 21316 if v.Op != OpAMD64SETLE { 21317 break 21318 } 21319 cmp := v.Args[0] 21320 yes := b.Succs[0] 21321 no := b.Succs[1] 21322 b.Kind = BlockAMD64LE 21323 b.SetControl(cmp) 21324 _ = yes 21325 _ = no 21326 return true 21327 } 21328 // match: (If (SETG cmp) yes no) 21329 // cond: 21330 // result: (GT cmp yes no) 21331 for { 21332 v := b.Control 21333 if v.Op != OpAMD64SETG { 21334 break 21335 } 21336 cmp := v.Args[0] 21337 yes := b.Succs[0] 21338 no := b.Succs[1] 21339 b.Kind = BlockAMD64GT 21340 b.SetControl(cmp) 21341 _ = yes 21342 _ = no 21343 return true 21344 } 21345 // match: (If (SETGE cmp) yes no) 21346 // cond: 21347 // result: (GE cmp yes no) 21348 for { 21349 v := b.Control 21350 if v.Op != OpAMD64SETGE { 21351 break 21352 } 21353 cmp := v.Args[0] 21354 yes := b.Succs[0] 21355 no := b.Succs[1] 21356 b.Kind = BlockAMD64GE 21357 b.SetControl(cmp) 21358 _ = yes 21359 _ = no 21360 return true 21361 } 21362 // match: (If (SETEQ cmp) yes no) 21363 // cond: 21364 // result: (EQ cmp yes no) 21365 for { 21366 v := b.Control 21367 if v.Op != OpAMD64SETEQ { 21368 break 21369 } 21370 cmp := v.Args[0] 21371 yes := b.Succs[0] 21372 no := b.Succs[1] 21373 b.Kind = BlockAMD64EQ 21374 b.SetControl(cmp) 21375 _ = yes 21376 _ = no 21377 return true 21378 } 21379 // match: (If (SETNE cmp) yes no) 21380 // cond: 21381 // result: (NE cmp yes no) 21382 for { 21383 v := b.Control 21384 if v.Op != OpAMD64SETNE { 21385 break 21386 } 21387 cmp := v.Args[0] 21388 yes := b.Succs[0] 21389 no := b.Succs[1] 21390 b.Kind = BlockAMD64NE 21391 b.SetControl(cmp) 21392 _ = yes 21393 _ = no 21394 return true 21395 } 21396 // match: (If (SETB cmp) yes no) 21397 // cond: 21398 // result: (ULT cmp yes no) 21399 for { 21400 v := b.Control 21401 if v.Op != OpAMD64SETB { 21402 break 21403 } 21404 cmp := v.Args[0] 21405 yes := b.Succs[0] 21406 no := b.Succs[1] 21407 b.Kind = BlockAMD64ULT 21408 b.SetControl(cmp) 21409 _ = yes 21410 _ = no 21411 return true 21412 } 21413 // match: (If (SETBE cmp) yes no) 21414 // cond: 21415 // result: (ULE cmp yes no) 21416 for { 21417 v := b.Control 21418 if v.Op != OpAMD64SETBE { 21419 break 21420 } 21421 cmp := v.Args[0] 21422 yes := b.Succs[0] 21423 no := b.Succs[1] 21424 b.Kind = BlockAMD64ULE 21425 b.SetControl(cmp) 21426 _ = yes 21427 _ = no 21428 return true 21429 } 21430 // match: (If (SETA cmp) yes no) 21431 // cond: 21432 // result: (UGT cmp yes no) 21433 for { 21434 v := b.Control 21435 if v.Op != OpAMD64SETA { 21436 break 21437 } 21438 cmp := v.Args[0] 21439 yes := b.Succs[0] 21440 no := b.Succs[1] 21441 b.Kind = BlockAMD64UGT 21442 b.SetControl(cmp) 21443 _ = yes 21444 _ = no 21445 return true 21446 } 21447 // match: (If (SETAE cmp) yes no) 21448 // cond: 21449 // result: (UGE cmp yes no) 21450 for { 21451 v := b.Control 21452 if v.Op != OpAMD64SETAE { 21453 break 21454 } 21455 cmp := v.Args[0] 21456 yes := b.Succs[0] 21457 no := b.Succs[1] 21458 b.Kind = BlockAMD64UGE 21459 b.SetControl(cmp) 21460 _ = yes 21461 _ = no 21462 return true 21463 } 21464 // match: (If (SETGF cmp) yes no) 21465 // cond: 21466 // result: (UGT cmp yes no) 21467 for { 21468 v := b.Control 21469 if v.Op != OpAMD64SETGF { 21470 break 21471 } 21472 cmp := v.Args[0] 21473 yes := b.Succs[0] 21474 no := b.Succs[1] 21475 b.Kind = BlockAMD64UGT 21476 b.SetControl(cmp) 21477 _ = yes 21478 _ = no 21479 return true 21480 } 21481 // match: (If (SETGEF cmp) yes no) 21482 // cond: 21483 // result: (UGE cmp yes no) 21484 for { 21485 v := b.Control 21486 if v.Op != OpAMD64SETGEF { 21487 break 21488 } 21489 cmp := v.Args[0] 21490 yes := b.Succs[0] 21491 no := b.Succs[1] 21492 b.Kind = BlockAMD64UGE 21493 b.SetControl(cmp) 21494 _ = yes 21495 _ = no 21496 return true 21497 } 21498 // match: (If (SETEQF cmp) yes no) 21499 // cond: 21500 // result: (EQF cmp yes no) 21501 for { 21502 v := b.Control 21503 if v.Op != OpAMD64SETEQF { 21504 break 21505 } 21506 cmp := v.Args[0] 21507 yes := b.Succs[0] 21508 no := b.Succs[1] 21509 b.Kind = BlockAMD64EQF 21510 b.SetControl(cmp) 21511 _ = yes 21512 _ = no 21513 return true 21514 } 21515 // match: (If (SETNEF cmp) yes no) 21516 // cond: 21517 // result: (NEF cmp yes no) 21518 for { 21519 v := b.Control 21520 if v.Op != OpAMD64SETNEF { 21521 break 21522 } 21523 cmp := v.Args[0] 21524 yes := b.Succs[0] 21525 no := b.Succs[1] 21526 b.Kind = BlockAMD64NEF 21527 b.SetControl(cmp) 21528 _ = yes 21529 _ = no 21530 return true 21531 } 21532 // match: (If cond yes no) 21533 // cond: 21534 // result: (NE (TESTB cond cond) yes no) 21535 for { 21536 v := b.Control 21537 _ = v 21538 cond := b.Control 21539 yes := b.Succs[0] 21540 no := b.Succs[1] 21541 b.Kind = BlockAMD64NE 21542 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, TypeFlags) 21543 v0.AddArg(cond) 21544 v0.AddArg(cond) 21545 b.SetControl(v0) 21546 _ = yes 21547 _ = no 21548 return true 21549 } 21550 case BlockAMD64LE: 21551 // match: (LE (InvertFlags cmp) yes no) 21552 // cond: 21553 // result: (GE cmp yes no) 21554 for { 21555 v := b.Control 21556 if v.Op != OpAMD64InvertFlags { 21557 break 21558 } 21559 cmp := v.Args[0] 21560 yes := b.Succs[0] 21561 no := b.Succs[1] 21562 b.Kind = BlockAMD64GE 21563 b.SetControl(cmp) 21564 _ = yes 21565 _ = no 21566 return true 21567 } 21568 // match: (LE (FlagEQ) yes no) 21569 // cond: 21570 // result: (First nil yes no) 21571 for { 21572 v := b.Control 21573 if v.Op != OpAMD64FlagEQ { 21574 break 21575 } 21576 yes := b.Succs[0] 21577 no := b.Succs[1] 21578 b.Kind = BlockFirst 21579 b.SetControl(nil) 21580 _ = yes 21581 _ = no 21582 return true 21583 } 21584 // match: (LE (FlagLT_ULT) yes no) 21585 // cond: 21586 // result: (First nil yes no) 21587 for { 21588 v := b.Control 21589 if v.Op != OpAMD64FlagLT_ULT { 21590 break 21591 } 21592 yes := b.Succs[0] 21593 no := b.Succs[1] 21594 b.Kind = BlockFirst 21595 b.SetControl(nil) 21596 _ = yes 21597 _ = no 21598 return true 21599 } 21600 // match: (LE (FlagLT_UGT) yes no) 21601 // cond: 21602 // result: (First nil yes no) 21603 for { 21604 v := b.Control 21605 if v.Op != OpAMD64FlagLT_UGT { 21606 break 21607 } 21608 yes := b.Succs[0] 21609 no := b.Succs[1] 21610 b.Kind = BlockFirst 21611 b.SetControl(nil) 21612 _ = yes 21613 _ = no 21614 return true 21615 } 21616 // match: (LE (FlagGT_ULT) yes no) 21617 // cond: 21618 // result: (First nil no yes) 21619 for { 21620 v := b.Control 21621 if v.Op != OpAMD64FlagGT_ULT { 21622 break 21623 } 21624 yes := b.Succs[0] 21625 no := b.Succs[1] 21626 b.Kind = BlockFirst 21627 b.SetControl(nil) 21628 b.swapSuccessors() 21629 _ = no 21630 _ = yes 21631 return true 21632 } 21633 // match: (LE (FlagGT_UGT) yes no) 21634 // cond: 21635 // result: (First nil no yes) 21636 for { 21637 v := b.Control 21638 if v.Op != OpAMD64FlagGT_UGT { 21639 break 21640 } 21641 yes := b.Succs[0] 21642 no := b.Succs[1] 21643 b.Kind = BlockFirst 21644 b.SetControl(nil) 21645 b.swapSuccessors() 21646 _ = no 21647 _ = yes 21648 return true 21649 } 21650 case BlockAMD64LT: 21651 // match: (LT (InvertFlags cmp) yes no) 21652 // cond: 21653 // result: (GT cmp yes no) 21654 for { 21655 v := b.Control 21656 if v.Op != OpAMD64InvertFlags { 21657 break 21658 } 21659 cmp := v.Args[0] 21660 yes := b.Succs[0] 21661 no := b.Succs[1] 21662 b.Kind = BlockAMD64GT 21663 b.SetControl(cmp) 21664 _ = yes 21665 _ = no 21666 return true 21667 } 21668 // match: (LT (FlagEQ) yes no) 21669 // cond: 21670 // result: (First nil no yes) 21671 for { 21672 v := b.Control 21673 if v.Op != OpAMD64FlagEQ { 21674 break 21675 } 21676 yes := b.Succs[0] 21677 no := b.Succs[1] 21678 b.Kind = BlockFirst 21679 b.SetControl(nil) 21680 b.swapSuccessors() 21681 _ = no 21682 _ = yes 21683 return true 21684 } 21685 // match: (LT (FlagLT_ULT) yes no) 21686 // cond: 21687 // result: (First nil yes no) 21688 for { 21689 v := b.Control 21690 if v.Op != OpAMD64FlagLT_ULT { 21691 break 21692 } 21693 yes := b.Succs[0] 21694 no := b.Succs[1] 21695 b.Kind = BlockFirst 21696 b.SetControl(nil) 21697 _ = yes 21698 _ = no 21699 return true 21700 } 21701 // match: (LT (FlagLT_UGT) yes no) 21702 // cond: 21703 // result: (First nil yes no) 21704 for { 21705 v := b.Control 21706 if v.Op != OpAMD64FlagLT_UGT { 21707 break 21708 } 21709 yes := b.Succs[0] 21710 no := b.Succs[1] 21711 b.Kind = BlockFirst 21712 b.SetControl(nil) 21713 _ = yes 21714 _ = no 21715 return true 21716 } 21717 // match: (LT (FlagGT_ULT) yes no) 21718 // cond: 21719 // result: (First nil no yes) 21720 for { 21721 v := b.Control 21722 if v.Op != OpAMD64FlagGT_ULT { 21723 break 21724 } 21725 yes := b.Succs[0] 21726 no := b.Succs[1] 21727 b.Kind = BlockFirst 21728 b.SetControl(nil) 21729 b.swapSuccessors() 21730 _ = no 21731 _ = yes 21732 return true 21733 } 21734 // match: (LT (FlagGT_UGT) yes no) 21735 // cond: 21736 // result: (First nil no yes) 21737 for { 21738 v := b.Control 21739 if v.Op != OpAMD64FlagGT_UGT { 21740 break 21741 } 21742 yes := b.Succs[0] 21743 no := b.Succs[1] 21744 b.Kind = BlockFirst 21745 b.SetControl(nil) 21746 b.swapSuccessors() 21747 _ = no 21748 _ = yes 21749 return true 21750 } 21751 case BlockAMD64NE: 21752 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 21753 // cond: 21754 // result: (LT cmp yes no) 21755 for { 21756 v := b.Control 21757 if v.Op != OpAMD64TESTB { 21758 break 21759 } 21760 v_0 := v.Args[0] 21761 if v_0.Op != OpAMD64SETL { 21762 break 21763 } 21764 cmp := v_0.Args[0] 21765 v_1 := v.Args[1] 21766 if v_1.Op != OpAMD64SETL { 21767 break 21768 } 21769 if cmp != v_1.Args[0] { 21770 break 21771 } 21772 yes := b.Succs[0] 21773 no := b.Succs[1] 21774 b.Kind = BlockAMD64LT 21775 b.SetControl(cmp) 21776 _ = yes 21777 _ = no 21778 return true 21779 } 21780 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 21781 // cond: 21782 // result: (LE cmp yes no) 21783 for { 21784 v := b.Control 21785 if v.Op != OpAMD64TESTB { 21786 break 21787 } 21788 v_0 := v.Args[0] 21789 if v_0.Op != OpAMD64SETLE { 21790 break 21791 } 21792 cmp := v_0.Args[0] 21793 v_1 := v.Args[1] 21794 if v_1.Op != OpAMD64SETLE { 21795 break 21796 } 21797 if cmp != v_1.Args[0] { 21798 break 21799 } 21800 yes := b.Succs[0] 21801 no := b.Succs[1] 21802 b.Kind = BlockAMD64LE 21803 b.SetControl(cmp) 21804 _ = yes 21805 _ = no 21806 return true 21807 } 21808 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 21809 // cond: 21810 // result: (GT cmp yes no) 21811 for { 21812 v := b.Control 21813 if v.Op != OpAMD64TESTB { 21814 break 21815 } 21816 v_0 := v.Args[0] 21817 if v_0.Op != OpAMD64SETG { 21818 break 21819 } 21820 cmp := v_0.Args[0] 21821 v_1 := v.Args[1] 21822 if v_1.Op != OpAMD64SETG { 21823 break 21824 } 21825 if cmp != v_1.Args[0] { 21826 break 21827 } 21828 yes := b.Succs[0] 21829 no := b.Succs[1] 21830 b.Kind = BlockAMD64GT 21831 b.SetControl(cmp) 21832 _ = yes 21833 _ = no 21834 return true 21835 } 21836 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 21837 // cond: 21838 // result: (GE cmp yes no) 21839 for { 21840 v := b.Control 21841 if v.Op != OpAMD64TESTB { 21842 break 21843 } 21844 v_0 := v.Args[0] 21845 if v_0.Op != OpAMD64SETGE { 21846 break 21847 } 21848 cmp := v_0.Args[0] 21849 v_1 := v.Args[1] 21850 if v_1.Op != OpAMD64SETGE { 21851 break 21852 } 21853 if cmp != v_1.Args[0] { 21854 break 21855 } 21856 yes := b.Succs[0] 21857 no := b.Succs[1] 21858 b.Kind = BlockAMD64GE 21859 b.SetControl(cmp) 21860 _ = yes 21861 _ = no 21862 return true 21863 } 21864 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 21865 // cond: 21866 // result: (EQ cmp yes no) 21867 for { 21868 v := b.Control 21869 if v.Op != OpAMD64TESTB { 21870 break 21871 } 21872 v_0 := v.Args[0] 21873 if v_0.Op != OpAMD64SETEQ { 21874 break 21875 } 21876 cmp := v_0.Args[0] 21877 v_1 := v.Args[1] 21878 if v_1.Op != OpAMD64SETEQ { 21879 break 21880 } 21881 if cmp != v_1.Args[0] { 21882 break 21883 } 21884 yes := b.Succs[0] 21885 no := b.Succs[1] 21886 b.Kind = BlockAMD64EQ 21887 b.SetControl(cmp) 21888 _ = yes 21889 _ = no 21890 return true 21891 } 21892 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 21893 // cond: 21894 // result: (NE cmp yes no) 21895 for { 21896 v := b.Control 21897 if v.Op != OpAMD64TESTB { 21898 break 21899 } 21900 v_0 := v.Args[0] 21901 if v_0.Op != OpAMD64SETNE { 21902 break 21903 } 21904 cmp := v_0.Args[0] 21905 v_1 := v.Args[1] 21906 if v_1.Op != OpAMD64SETNE { 21907 break 21908 } 21909 if cmp != v_1.Args[0] { 21910 break 21911 } 21912 yes := b.Succs[0] 21913 no := b.Succs[1] 21914 b.Kind = BlockAMD64NE 21915 b.SetControl(cmp) 21916 _ = yes 21917 _ = no 21918 return true 21919 } 21920 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 21921 // cond: 21922 // result: (ULT cmp yes no) 21923 for { 21924 v := b.Control 21925 if v.Op != OpAMD64TESTB { 21926 break 21927 } 21928 v_0 := v.Args[0] 21929 if v_0.Op != OpAMD64SETB { 21930 break 21931 } 21932 cmp := v_0.Args[0] 21933 v_1 := v.Args[1] 21934 if v_1.Op != OpAMD64SETB { 21935 break 21936 } 21937 if cmp != v_1.Args[0] { 21938 break 21939 } 21940 yes := b.Succs[0] 21941 no := b.Succs[1] 21942 b.Kind = BlockAMD64ULT 21943 b.SetControl(cmp) 21944 _ = yes 21945 _ = no 21946 return true 21947 } 21948 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 21949 // cond: 21950 // result: (ULE cmp yes no) 21951 for { 21952 v := b.Control 21953 if v.Op != OpAMD64TESTB { 21954 break 21955 } 21956 v_0 := v.Args[0] 21957 if v_0.Op != OpAMD64SETBE { 21958 break 21959 } 21960 cmp := v_0.Args[0] 21961 v_1 := v.Args[1] 21962 if v_1.Op != OpAMD64SETBE { 21963 break 21964 } 21965 if cmp != v_1.Args[0] { 21966 break 21967 } 21968 yes := b.Succs[0] 21969 no := b.Succs[1] 21970 b.Kind = BlockAMD64ULE 21971 b.SetControl(cmp) 21972 _ = yes 21973 _ = no 21974 return true 21975 } 21976 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 21977 // cond: 21978 // result: (UGT cmp yes no) 21979 for { 21980 v := b.Control 21981 if v.Op != OpAMD64TESTB { 21982 break 21983 } 21984 v_0 := v.Args[0] 21985 if v_0.Op != OpAMD64SETA { 21986 break 21987 } 21988 cmp := v_0.Args[0] 21989 v_1 := v.Args[1] 21990 if v_1.Op != OpAMD64SETA { 21991 break 21992 } 21993 if cmp != v_1.Args[0] { 21994 break 21995 } 21996 yes := b.Succs[0] 21997 no := b.Succs[1] 21998 b.Kind = BlockAMD64UGT 21999 b.SetControl(cmp) 22000 _ = yes 22001 _ = no 22002 return true 22003 } 22004 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 22005 // cond: 22006 // result: (UGE cmp yes no) 22007 for { 22008 v := b.Control 22009 if v.Op != OpAMD64TESTB { 22010 break 22011 } 22012 v_0 := v.Args[0] 22013 if v_0.Op != OpAMD64SETAE { 22014 break 22015 } 22016 cmp := v_0.Args[0] 22017 v_1 := v.Args[1] 22018 if v_1.Op != OpAMD64SETAE { 22019 break 22020 } 22021 if cmp != v_1.Args[0] { 22022 break 22023 } 22024 yes := b.Succs[0] 22025 no := b.Succs[1] 22026 b.Kind = BlockAMD64UGE 22027 b.SetControl(cmp) 22028 _ = yes 22029 _ = no 22030 return true 22031 } 22032 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 22033 // cond: 22034 // result: (UGT cmp yes no) 22035 for { 22036 v := b.Control 22037 if v.Op != OpAMD64TESTB { 22038 break 22039 } 22040 v_0 := v.Args[0] 22041 if v_0.Op != OpAMD64SETGF { 22042 break 22043 } 22044 cmp := v_0.Args[0] 22045 v_1 := v.Args[1] 22046 if v_1.Op != OpAMD64SETGF { 22047 break 22048 } 22049 if cmp != v_1.Args[0] { 22050 break 22051 } 22052 yes := b.Succs[0] 22053 no := b.Succs[1] 22054 b.Kind = BlockAMD64UGT 22055 b.SetControl(cmp) 22056 _ = yes 22057 _ = no 22058 return true 22059 } 22060 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 22061 // cond: 22062 // result: (UGE cmp yes no) 22063 for { 22064 v := b.Control 22065 if v.Op != OpAMD64TESTB { 22066 break 22067 } 22068 v_0 := v.Args[0] 22069 if v_0.Op != OpAMD64SETGEF { 22070 break 22071 } 22072 cmp := v_0.Args[0] 22073 v_1 := v.Args[1] 22074 if v_1.Op != OpAMD64SETGEF { 22075 break 22076 } 22077 if cmp != v_1.Args[0] { 22078 break 22079 } 22080 yes := b.Succs[0] 22081 no := b.Succs[1] 22082 b.Kind = BlockAMD64UGE 22083 b.SetControl(cmp) 22084 _ = yes 22085 _ = no 22086 return true 22087 } 22088 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 22089 // cond: 22090 // result: (EQF cmp yes no) 22091 for { 22092 v := b.Control 22093 if v.Op != OpAMD64TESTB { 22094 break 22095 } 22096 v_0 := v.Args[0] 22097 if v_0.Op != OpAMD64SETEQF { 22098 break 22099 } 22100 cmp := v_0.Args[0] 22101 v_1 := v.Args[1] 22102 if v_1.Op != OpAMD64SETEQF { 22103 break 22104 } 22105 if cmp != v_1.Args[0] { 22106 break 22107 } 22108 yes := b.Succs[0] 22109 no := b.Succs[1] 22110 b.Kind = BlockAMD64EQF 22111 b.SetControl(cmp) 22112 _ = yes 22113 _ = no 22114 return true 22115 } 22116 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 22117 // cond: 22118 // result: (NEF cmp yes no) 22119 for { 22120 v := b.Control 22121 if v.Op != OpAMD64TESTB { 22122 break 22123 } 22124 v_0 := v.Args[0] 22125 if v_0.Op != OpAMD64SETNEF { 22126 break 22127 } 22128 cmp := v_0.Args[0] 22129 v_1 := v.Args[1] 22130 if v_1.Op != OpAMD64SETNEF { 22131 break 22132 } 22133 if cmp != v_1.Args[0] { 22134 break 22135 } 22136 yes := b.Succs[0] 22137 no := b.Succs[1] 22138 b.Kind = BlockAMD64NEF 22139 b.SetControl(cmp) 22140 _ = yes 22141 _ = no 22142 return true 22143 } 22144 // match: (NE (InvertFlags cmp) yes no) 22145 // cond: 22146 // result: (NE cmp yes no) 22147 for { 22148 v := b.Control 22149 if v.Op != OpAMD64InvertFlags { 22150 break 22151 } 22152 cmp := v.Args[0] 22153 yes := b.Succs[0] 22154 no := b.Succs[1] 22155 b.Kind = BlockAMD64NE 22156 b.SetControl(cmp) 22157 _ = yes 22158 _ = no 22159 return true 22160 } 22161 // match: (NE (FlagEQ) yes no) 22162 // cond: 22163 // result: (First nil no yes) 22164 for { 22165 v := b.Control 22166 if v.Op != OpAMD64FlagEQ { 22167 break 22168 } 22169 yes := b.Succs[0] 22170 no := b.Succs[1] 22171 b.Kind = BlockFirst 22172 b.SetControl(nil) 22173 b.swapSuccessors() 22174 _ = no 22175 _ = yes 22176 return true 22177 } 22178 // match: (NE (FlagLT_ULT) yes no) 22179 // cond: 22180 // result: (First nil yes no) 22181 for { 22182 v := b.Control 22183 if v.Op != OpAMD64FlagLT_ULT { 22184 break 22185 } 22186 yes := b.Succs[0] 22187 no := b.Succs[1] 22188 b.Kind = BlockFirst 22189 b.SetControl(nil) 22190 _ = yes 22191 _ = no 22192 return true 22193 } 22194 // match: (NE (FlagLT_UGT) yes no) 22195 // cond: 22196 // result: (First nil yes no) 22197 for { 22198 v := b.Control 22199 if v.Op != OpAMD64FlagLT_UGT { 22200 break 22201 } 22202 yes := b.Succs[0] 22203 no := b.Succs[1] 22204 b.Kind = BlockFirst 22205 b.SetControl(nil) 22206 _ = yes 22207 _ = no 22208 return true 22209 } 22210 // match: (NE (FlagGT_ULT) yes no) 22211 // cond: 22212 // result: (First nil yes no) 22213 for { 22214 v := b.Control 22215 if v.Op != OpAMD64FlagGT_ULT { 22216 break 22217 } 22218 yes := b.Succs[0] 22219 no := b.Succs[1] 22220 b.Kind = BlockFirst 22221 b.SetControl(nil) 22222 _ = yes 22223 _ = no 22224 return true 22225 } 22226 // match: (NE (FlagGT_UGT) yes no) 22227 // cond: 22228 // result: (First nil yes no) 22229 for { 22230 v := b.Control 22231 if v.Op != OpAMD64FlagGT_UGT { 22232 break 22233 } 22234 yes := b.Succs[0] 22235 no := b.Succs[1] 22236 b.Kind = BlockFirst 22237 b.SetControl(nil) 22238 _ = yes 22239 _ = no 22240 return true 22241 } 22242 case BlockAMD64UGE: 22243 // match: (UGE (InvertFlags cmp) yes no) 22244 // cond: 22245 // result: (ULE cmp yes no) 22246 for { 22247 v := b.Control 22248 if v.Op != OpAMD64InvertFlags { 22249 break 22250 } 22251 cmp := v.Args[0] 22252 yes := b.Succs[0] 22253 no := b.Succs[1] 22254 b.Kind = BlockAMD64ULE 22255 b.SetControl(cmp) 22256 _ = yes 22257 _ = no 22258 return true 22259 } 22260 // match: (UGE (FlagEQ) yes no) 22261 // cond: 22262 // result: (First nil yes no) 22263 for { 22264 v := b.Control 22265 if v.Op != OpAMD64FlagEQ { 22266 break 22267 } 22268 yes := b.Succs[0] 22269 no := b.Succs[1] 22270 b.Kind = BlockFirst 22271 b.SetControl(nil) 22272 _ = yes 22273 _ = no 22274 return true 22275 } 22276 // match: (UGE (FlagLT_ULT) yes no) 22277 // cond: 22278 // result: (First nil no yes) 22279 for { 22280 v := b.Control 22281 if v.Op != OpAMD64FlagLT_ULT { 22282 break 22283 } 22284 yes := b.Succs[0] 22285 no := b.Succs[1] 22286 b.Kind = BlockFirst 22287 b.SetControl(nil) 22288 b.swapSuccessors() 22289 _ = no 22290 _ = yes 22291 return true 22292 } 22293 // match: (UGE (FlagLT_UGT) yes no) 22294 // cond: 22295 // result: (First nil yes no) 22296 for { 22297 v := b.Control 22298 if v.Op != OpAMD64FlagLT_UGT { 22299 break 22300 } 22301 yes := b.Succs[0] 22302 no := b.Succs[1] 22303 b.Kind = BlockFirst 22304 b.SetControl(nil) 22305 _ = yes 22306 _ = no 22307 return true 22308 } 22309 // match: (UGE (FlagGT_ULT) yes no) 22310 // cond: 22311 // result: (First nil no yes) 22312 for { 22313 v := b.Control 22314 if v.Op != OpAMD64FlagGT_ULT { 22315 break 22316 } 22317 yes := b.Succs[0] 22318 no := b.Succs[1] 22319 b.Kind = BlockFirst 22320 b.SetControl(nil) 22321 b.swapSuccessors() 22322 _ = no 22323 _ = yes 22324 return true 22325 } 22326 // match: (UGE (FlagGT_UGT) yes no) 22327 // cond: 22328 // result: (First nil yes no) 22329 for { 22330 v := b.Control 22331 if v.Op != OpAMD64FlagGT_UGT { 22332 break 22333 } 22334 yes := b.Succs[0] 22335 no := b.Succs[1] 22336 b.Kind = BlockFirst 22337 b.SetControl(nil) 22338 _ = yes 22339 _ = no 22340 return true 22341 } 22342 case BlockAMD64UGT: 22343 // match: (UGT (InvertFlags cmp) yes no) 22344 // cond: 22345 // result: (ULT cmp yes no) 22346 for { 22347 v := b.Control 22348 if v.Op != OpAMD64InvertFlags { 22349 break 22350 } 22351 cmp := v.Args[0] 22352 yes := b.Succs[0] 22353 no := b.Succs[1] 22354 b.Kind = BlockAMD64ULT 22355 b.SetControl(cmp) 22356 _ = yes 22357 _ = no 22358 return true 22359 } 22360 // match: (UGT (FlagEQ) yes no) 22361 // cond: 22362 // result: (First nil no yes) 22363 for { 22364 v := b.Control 22365 if v.Op != OpAMD64FlagEQ { 22366 break 22367 } 22368 yes := b.Succs[0] 22369 no := b.Succs[1] 22370 b.Kind = BlockFirst 22371 b.SetControl(nil) 22372 b.swapSuccessors() 22373 _ = no 22374 _ = yes 22375 return true 22376 } 22377 // match: (UGT (FlagLT_ULT) yes no) 22378 // cond: 22379 // result: (First nil no yes) 22380 for { 22381 v := b.Control 22382 if v.Op != OpAMD64FlagLT_ULT { 22383 break 22384 } 22385 yes := b.Succs[0] 22386 no := b.Succs[1] 22387 b.Kind = BlockFirst 22388 b.SetControl(nil) 22389 b.swapSuccessors() 22390 _ = no 22391 _ = yes 22392 return true 22393 } 22394 // match: (UGT (FlagLT_UGT) yes no) 22395 // cond: 22396 // result: (First nil yes no) 22397 for { 22398 v := b.Control 22399 if v.Op != OpAMD64FlagLT_UGT { 22400 break 22401 } 22402 yes := b.Succs[0] 22403 no := b.Succs[1] 22404 b.Kind = BlockFirst 22405 b.SetControl(nil) 22406 _ = yes 22407 _ = no 22408 return true 22409 } 22410 // match: (UGT (FlagGT_ULT) yes no) 22411 // cond: 22412 // result: (First nil no yes) 22413 for { 22414 v := b.Control 22415 if v.Op != OpAMD64FlagGT_ULT { 22416 break 22417 } 22418 yes := b.Succs[0] 22419 no := b.Succs[1] 22420 b.Kind = BlockFirst 22421 b.SetControl(nil) 22422 b.swapSuccessors() 22423 _ = no 22424 _ = yes 22425 return true 22426 } 22427 // match: (UGT (FlagGT_UGT) yes no) 22428 // cond: 22429 // result: (First nil yes no) 22430 for { 22431 v := b.Control 22432 if v.Op != OpAMD64FlagGT_UGT { 22433 break 22434 } 22435 yes := b.Succs[0] 22436 no := b.Succs[1] 22437 b.Kind = BlockFirst 22438 b.SetControl(nil) 22439 _ = yes 22440 _ = no 22441 return true 22442 } 22443 case BlockAMD64ULE: 22444 // match: (ULE (InvertFlags cmp) yes no) 22445 // cond: 22446 // result: (UGE cmp yes no) 22447 for { 22448 v := b.Control 22449 if v.Op != OpAMD64InvertFlags { 22450 break 22451 } 22452 cmp := v.Args[0] 22453 yes := b.Succs[0] 22454 no := b.Succs[1] 22455 b.Kind = BlockAMD64UGE 22456 b.SetControl(cmp) 22457 _ = yes 22458 _ = no 22459 return true 22460 } 22461 // match: (ULE (FlagEQ) yes no) 22462 // cond: 22463 // result: (First nil yes no) 22464 for { 22465 v := b.Control 22466 if v.Op != OpAMD64FlagEQ { 22467 break 22468 } 22469 yes := b.Succs[0] 22470 no := b.Succs[1] 22471 b.Kind = BlockFirst 22472 b.SetControl(nil) 22473 _ = yes 22474 _ = no 22475 return true 22476 } 22477 // match: (ULE (FlagLT_ULT) yes no) 22478 // cond: 22479 // result: (First nil yes no) 22480 for { 22481 v := b.Control 22482 if v.Op != OpAMD64FlagLT_ULT { 22483 break 22484 } 22485 yes := b.Succs[0] 22486 no := b.Succs[1] 22487 b.Kind = BlockFirst 22488 b.SetControl(nil) 22489 _ = yes 22490 _ = no 22491 return true 22492 } 22493 // match: (ULE (FlagLT_UGT) yes no) 22494 // cond: 22495 // result: (First nil no yes) 22496 for { 22497 v := b.Control 22498 if v.Op != OpAMD64FlagLT_UGT { 22499 break 22500 } 22501 yes := b.Succs[0] 22502 no := b.Succs[1] 22503 b.Kind = BlockFirst 22504 b.SetControl(nil) 22505 b.swapSuccessors() 22506 _ = no 22507 _ = yes 22508 return true 22509 } 22510 // match: (ULE (FlagGT_ULT) yes no) 22511 // cond: 22512 // result: (First nil yes no) 22513 for { 22514 v := b.Control 22515 if v.Op != OpAMD64FlagGT_ULT { 22516 break 22517 } 22518 yes := b.Succs[0] 22519 no := b.Succs[1] 22520 b.Kind = BlockFirst 22521 b.SetControl(nil) 22522 _ = yes 22523 _ = no 22524 return true 22525 } 22526 // match: (ULE (FlagGT_UGT) yes no) 22527 // cond: 22528 // result: (First nil no yes) 22529 for { 22530 v := b.Control 22531 if v.Op != OpAMD64FlagGT_UGT { 22532 break 22533 } 22534 yes := b.Succs[0] 22535 no := b.Succs[1] 22536 b.Kind = BlockFirst 22537 b.SetControl(nil) 22538 b.swapSuccessors() 22539 _ = no 22540 _ = yes 22541 return true 22542 } 22543 case BlockAMD64ULT: 22544 // match: (ULT (InvertFlags cmp) yes no) 22545 // cond: 22546 // result: (UGT cmp yes no) 22547 for { 22548 v := b.Control 22549 if v.Op != OpAMD64InvertFlags { 22550 break 22551 } 22552 cmp := v.Args[0] 22553 yes := b.Succs[0] 22554 no := b.Succs[1] 22555 b.Kind = BlockAMD64UGT 22556 b.SetControl(cmp) 22557 _ = yes 22558 _ = no 22559 return true 22560 } 22561 // match: (ULT (FlagEQ) yes no) 22562 // cond: 22563 // result: (First nil no yes) 22564 for { 22565 v := b.Control 22566 if v.Op != OpAMD64FlagEQ { 22567 break 22568 } 22569 yes := b.Succs[0] 22570 no := b.Succs[1] 22571 b.Kind = BlockFirst 22572 b.SetControl(nil) 22573 b.swapSuccessors() 22574 _ = no 22575 _ = yes 22576 return true 22577 } 22578 // match: (ULT (FlagLT_ULT) yes no) 22579 // cond: 22580 // result: (First nil yes no) 22581 for { 22582 v := b.Control 22583 if v.Op != OpAMD64FlagLT_ULT { 22584 break 22585 } 22586 yes := b.Succs[0] 22587 no := b.Succs[1] 22588 b.Kind = BlockFirst 22589 b.SetControl(nil) 22590 _ = yes 22591 _ = no 22592 return true 22593 } 22594 // match: (ULT (FlagLT_UGT) yes no) 22595 // cond: 22596 // result: (First nil no yes) 22597 for { 22598 v := b.Control 22599 if v.Op != OpAMD64FlagLT_UGT { 22600 break 22601 } 22602 yes := b.Succs[0] 22603 no := b.Succs[1] 22604 b.Kind = BlockFirst 22605 b.SetControl(nil) 22606 b.swapSuccessors() 22607 _ = no 22608 _ = yes 22609 return true 22610 } 22611 // match: (ULT (FlagGT_ULT) yes no) 22612 // cond: 22613 // result: (First nil yes no) 22614 for { 22615 v := b.Control 22616 if v.Op != OpAMD64FlagGT_ULT { 22617 break 22618 } 22619 yes := b.Succs[0] 22620 no := b.Succs[1] 22621 b.Kind = BlockFirst 22622 b.SetControl(nil) 22623 _ = yes 22624 _ = no 22625 return true 22626 } 22627 // match: (ULT (FlagGT_UGT) yes no) 22628 // cond: 22629 // result: (First nil no yes) 22630 for { 22631 v := b.Control 22632 if v.Op != OpAMD64FlagGT_UGT { 22633 break 22634 } 22635 yes := b.Succs[0] 22636 no := b.Succs[1] 22637 b.Kind = BlockFirst 22638 b.SetControl(nil) 22639 b.swapSuccessors() 22640 _ = no 22641 _ = yes 22642 return true 22643 } 22644 } 22645 return false 22646 }