github.com/rakyll/go@v0.0.0-20170216000551-64c02460d703/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // autogenerated from gen/AMD64.rules: do not edit! 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 8 var _ = math.MinInt8 // in case not otherwise used 9 func rewriteValueAMD64(v *Value, config *Config) bool { 10 switch v.Op { 11 case OpAMD64ADDL: 12 return rewriteValueAMD64_OpAMD64ADDL(v, config) 13 case OpAMD64ADDLconst: 14 return rewriteValueAMD64_OpAMD64ADDLconst(v, config) 15 case OpAMD64ADDQ: 16 return rewriteValueAMD64_OpAMD64ADDQ(v, config) 17 case OpAMD64ADDQconst: 18 return rewriteValueAMD64_OpAMD64ADDQconst(v, config) 19 case OpAMD64ANDL: 20 return rewriteValueAMD64_OpAMD64ANDL(v, config) 21 case OpAMD64ANDLconst: 22 return rewriteValueAMD64_OpAMD64ANDLconst(v, config) 23 case OpAMD64ANDQ: 24 return rewriteValueAMD64_OpAMD64ANDQ(v, config) 25 case OpAMD64ANDQconst: 26 return rewriteValueAMD64_OpAMD64ANDQconst(v, config) 27 case OpAMD64CMPB: 28 return rewriteValueAMD64_OpAMD64CMPB(v, config) 29 case OpAMD64CMPBconst: 30 return rewriteValueAMD64_OpAMD64CMPBconst(v, config) 31 case OpAMD64CMPL: 32 return rewriteValueAMD64_OpAMD64CMPL(v, config) 33 case OpAMD64CMPLconst: 34 return rewriteValueAMD64_OpAMD64CMPLconst(v, config) 35 case OpAMD64CMPQ: 36 return rewriteValueAMD64_OpAMD64CMPQ(v, config) 37 case OpAMD64CMPQconst: 38 return rewriteValueAMD64_OpAMD64CMPQconst(v, config) 39 case OpAMD64CMPW: 40 return rewriteValueAMD64_OpAMD64CMPW(v, config) 41 case OpAMD64CMPWconst: 42 return rewriteValueAMD64_OpAMD64CMPWconst(v, config) 43 case OpAMD64CMPXCHGLlock: 44 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v, config) 45 case OpAMD64CMPXCHGQlock: 46 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v, config) 47 case OpAMD64LEAL: 48 return rewriteValueAMD64_OpAMD64LEAL(v, config) 49 case OpAMD64LEAQ: 50 return rewriteValueAMD64_OpAMD64LEAQ(v, config) 51 case OpAMD64LEAQ1: 52 return rewriteValueAMD64_OpAMD64LEAQ1(v, config) 53 case OpAMD64LEAQ2: 54 return rewriteValueAMD64_OpAMD64LEAQ2(v, config) 55 case OpAMD64LEAQ4: 56 return rewriteValueAMD64_OpAMD64LEAQ4(v, config) 57 case OpAMD64LEAQ8: 58 return rewriteValueAMD64_OpAMD64LEAQ8(v, config) 59 case OpAMD64MOVBQSX: 60 return rewriteValueAMD64_OpAMD64MOVBQSX(v, config) 61 case OpAMD64MOVBQSXload: 62 return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config) 63 case OpAMD64MOVBQZX: 64 return rewriteValueAMD64_OpAMD64MOVBQZX(v, config) 65 case OpAMD64MOVBload: 66 return rewriteValueAMD64_OpAMD64MOVBload(v, config) 67 case OpAMD64MOVBloadidx1: 68 return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config) 69 case OpAMD64MOVBstore: 70 return rewriteValueAMD64_OpAMD64MOVBstore(v, config) 71 case OpAMD64MOVBstoreconst: 72 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config) 73 case OpAMD64MOVBstoreconstidx1: 74 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config) 75 case OpAMD64MOVBstoreidx1: 76 return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config) 77 case OpAMD64MOVLQSX: 78 return rewriteValueAMD64_OpAMD64MOVLQSX(v, config) 79 case OpAMD64MOVLQSXload: 80 return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config) 81 case OpAMD64MOVLQZX: 82 return rewriteValueAMD64_OpAMD64MOVLQZX(v, config) 83 case OpAMD64MOVLatomicload: 84 return rewriteValueAMD64_OpAMD64MOVLatomicload(v, config) 85 case OpAMD64MOVLload: 86 return rewriteValueAMD64_OpAMD64MOVLload(v, config) 87 case OpAMD64MOVLloadidx1: 88 return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config) 89 case OpAMD64MOVLloadidx4: 90 return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config) 91 case OpAMD64MOVLstore: 92 return rewriteValueAMD64_OpAMD64MOVLstore(v, config) 93 case OpAMD64MOVLstoreconst: 94 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config) 95 case OpAMD64MOVLstoreconstidx1: 96 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config) 97 case OpAMD64MOVLstoreconstidx4: 98 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config) 99 case OpAMD64MOVLstoreidx1: 100 return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config) 101 case OpAMD64MOVLstoreidx4: 102 return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config) 103 case OpAMD64MOVOload: 104 return rewriteValueAMD64_OpAMD64MOVOload(v, config) 105 case OpAMD64MOVOstore: 106 return rewriteValueAMD64_OpAMD64MOVOstore(v, config) 107 case OpAMD64MOVQatomicload: 108 return rewriteValueAMD64_OpAMD64MOVQatomicload(v, config) 109 case OpAMD64MOVQload: 110 return rewriteValueAMD64_OpAMD64MOVQload(v, config) 111 case OpAMD64MOVQloadidx1: 112 return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config) 113 case OpAMD64MOVQloadidx8: 114 return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config) 115 case OpAMD64MOVQstore: 116 return rewriteValueAMD64_OpAMD64MOVQstore(v, config) 117 case OpAMD64MOVQstoreconst: 118 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config) 119 case OpAMD64MOVQstoreconstidx1: 120 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config) 121 case OpAMD64MOVQstoreconstidx8: 122 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config) 123 case OpAMD64MOVQstoreidx1: 124 return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config) 125 case OpAMD64MOVQstoreidx8: 126 return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config) 127 case OpAMD64MOVSDload: 128 return rewriteValueAMD64_OpAMD64MOVSDload(v, config) 129 case OpAMD64MOVSDloadidx1: 130 return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config) 131 case OpAMD64MOVSDloadidx8: 132 return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config) 133 case OpAMD64MOVSDstore: 134 return rewriteValueAMD64_OpAMD64MOVSDstore(v, config) 135 case OpAMD64MOVSDstoreidx1: 136 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config) 137 case OpAMD64MOVSDstoreidx8: 138 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config) 139 case OpAMD64MOVSSload: 140 return rewriteValueAMD64_OpAMD64MOVSSload(v, config) 141 case OpAMD64MOVSSloadidx1: 142 return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config) 143 case OpAMD64MOVSSloadidx4: 144 return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config) 145 case OpAMD64MOVSSstore: 146 return rewriteValueAMD64_OpAMD64MOVSSstore(v, config) 147 case OpAMD64MOVSSstoreidx1: 148 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config) 149 case OpAMD64MOVSSstoreidx4: 150 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config) 151 case OpAMD64MOVWQSX: 152 return rewriteValueAMD64_OpAMD64MOVWQSX(v, config) 153 case OpAMD64MOVWQSXload: 154 return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config) 155 case OpAMD64MOVWQZX: 156 return rewriteValueAMD64_OpAMD64MOVWQZX(v, config) 157 case OpAMD64MOVWload: 158 return rewriteValueAMD64_OpAMD64MOVWload(v, config) 159 case OpAMD64MOVWloadidx1: 160 return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config) 161 case OpAMD64MOVWloadidx2: 162 return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config) 163 case OpAMD64MOVWstore: 164 return rewriteValueAMD64_OpAMD64MOVWstore(v, config) 165 case OpAMD64MOVWstoreconst: 166 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config) 167 case OpAMD64MOVWstoreconstidx1: 168 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config) 169 case OpAMD64MOVWstoreconstidx2: 170 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config) 171 case OpAMD64MOVWstoreidx1: 172 return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config) 173 case OpAMD64MOVWstoreidx2: 174 return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config) 175 case OpAMD64MULL: 176 return rewriteValueAMD64_OpAMD64MULL(v, config) 177 case OpAMD64MULLconst: 178 return rewriteValueAMD64_OpAMD64MULLconst(v, config) 179 case OpAMD64MULQ: 180 return rewriteValueAMD64_OpAMD64MULQ(v, config) 181 case OpAMD64MULQconst: 182 return rewriteValueAMD64_OpAMD64MULQconst(v, config) 183 case OpAMD64NEGL: 184 return rewriteValueAMD64_OpAMD64NEGL(v, config) 185 case OpAMD64NEGQ: 186 return rewriteValueAMD64_OpAMD64NEGQ(v, config) 187 case OpAMD64NOTL: 188 return rewriteValueAMD64_OpAMD64NOTL(v, config) 189 case OpAMD64NOTQ: 190 return rewriteValueAMD64_OpAMD64NOTQ(v, config) 191 case OpAMD64ORL: 192 return rewriteValueAMD64_OpAMD64ORL(v, config) 193 case OpAMD64ORLconst: 194 return rewriteValueAMD64_OpAMD64ORLconst(v, config) 195 case OpAMD64ORQ: 196 return rewriteValueAMD64_OpAMD64ORQ(v, config) 197 case OpAMD64ORQconst: 198 return rewriteValueAMD64_OpAMD64ORQconst(v, config) 199 case OpAMD64ROLBconst: 200 return rewriteValueAMD64_OpAMD64ROLBconst(v, config) 201 case OpAMD64ROLLconst: 202 return rewriteValueAMD64_OpAMD64ROLLconst(v, config) 203 case OpAMD64ROLQconst: 204 return rewriteValueAMD64_OpAMD64ROLQconst(v, config) 205 case OpAMD64ROLWconst: 206 return rewriteValueAMD64_OpAMD64ROLWconst(v, config) 207 case OpAMD64SARB: 208 return rewriteValueAMD64_OpAMD64SARB(v, config) 209 case OpAMD64SARBconst: 210 return rewriteValueAMD64_OpAMD64SARBconst(v, config) 211 case OpAMD64SARL: 212 return rewriteValueAMD64_OpAMD64SARL(v, config) 213 case OpAMD64SARLconst: 214 return rewriteValueAMD64_OpAMD64SARLconst(v, config) 215 case OpAMD64SARQ: 216 return rewriteValueAMD64_OpAMD64SARQ(v, config) 217 case OpAMD64SARQconst: 218 return rewriteValueAMD64_OpAMD64SARQconst(v, config) 219 case OpAMD64SARW: 220 return rewriteValueAMD64_OpAMD64SARW(v, config) 221 case OpAMD64SARWconst: 222 return rewriteValueAMD64_OpAMD64SARWconst(v, config) 223 case OpAMD64SBBLcarrymask: 224 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config) 225 case OpAMD64SBBQcarrymask: 226 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config) 227 case OpAMD64SETA: 228 return rewriteValueAMD64_OpAMD64SETA(v, config) 229 case OpAMD64SETAE: 230 return rewriteValueAMD64_OpAMD64SETAE(v, config) 231 case OpAMD64SETB: 232 return rewriteValueAMD64_OpAMD64SETB(v, config) 233 case OpAMD64SETBE: 234 return rewriteValueAMD64_OpAMD64SETBE(v, config) 235 case OpAMD64SETEQ: 236 return rewriteValueAMD64_OpAMD64SETEQ(v, config) 237 case OpAMD64SETG: 238 return rewriteValueAMD64_OpAMD64SETG(v, config) 239 case OpAMD64SETGE: 240 return rewriteValueAMD64_OpAMD64SETGE(v, config) 241 case OpAMD64SETL: 242 return rewriteValueAMD64_OpAMD64SETL(v, config) 243 case OpAMD64SETLE: 244 return rewriteValueAMD64_OpAMD64SETLE(v, config) 245 case OpAMD64SETNE: 246 return rewriteValueAMD64_OpAMD64SETNE(v, config) 247 case OpAMD64SHLL: 248 return rewriteValueAMD64_OpAMD64SHLL(v, config) 249 case OpAMD64SHLLconst: 250 return rewriteValueAMD64_OpAMD64SHLLconst(v, config) 251 case OpAMD64SHLQ: 252 return rewriteValueAMD64_OpAMD64SHLQ(v, config) 253 case OpAMD64SHLQconst: 254 return rewriteValueAMD64_OpAMD64SHLQconst(v, config) 255 case OpAMD64SHRB: 256 return rewriteValueAMD64_OpAMD64SHRB(v, config) 257 case OpAMD64SHRBconst: 258 return rewriteValueAMD64_OpAMD64SHRBconst(v, config) 259 case OpAMD64SHRL: 260 return rewriteValueAMD64_OpAMD64SHRL(v, config) 261 case OpAMD64SHRLconst: 262 return rewriteValueAMD64_OpAMD64SHRLconst(v, config) 263 case OpAMD64SHRQ: 264 return rewriteValueAMD64_OpAMD64SHRQ(v, config) 265 case OpAMD64SHRQconst: 266 return rewriteValueAMD64_OpAMD64SHRQconst(v, config) 267 case OpAMD64SHRW: 268 return rewriteValueAMD64_OpAMD64SHRW(v, config) 269 case OpAMD64SHRWconst: 270 return rewriteValueAMD64_OpAMD64SHRWconst(v, config) 271 case OpAMD64SUBL: 272 return rewriteValueAMD64_OpAMD64SUBL(v, config) 273 case OpAMD64SUBLconst: 274 return rewriteValueAMD64_OpAMD64SUBLconst(v, config) 275 case OpAMD64SUBQ: 276 return rewriteValueAMD64_OpAMD64SUBQ(v, config) 277 case OpAMD64SUBQconst: 278 return rewriteValueAMD64_OpAMD64SUBQconst(v, config) 279 case OpAMD64XADDLlock: 280 return rewriteValueAMD64_OpAMD64XADDLlock(v, config) 281 case OpAMD64XADDQlock: 282 return rewriteValueAMD64_OpAMD64XADDQlock(v, config) 283 case OpAMD64XCHGL: 284 return rewriteValueAMD64_OpAMD64XCHGL(v, config) 285 case OpAMD64XCHGQ: 286 return rewriteValueAMD64_OpAMD64XCHGQ(v, config) 287 case OpAMD64XORL: 288 return rewriteValueAMD64_OpAMD64XORL(v, config) 289 case OpAMD64XORLconst: 290 return rewriteValueAMD64_OpAMD64XORLconst(v, config) 291 case OpAMD64XORQ: 292 return rewriteValueAMD64_OpAMD64XORQ(v, config) 293 case OpAMD64XORQconst: 294 return rewriteValueAMD64_OpAMD64XORQconst(v, config) 295 case OpAdd16: 296 return rewriteValueAMD64_OpAdd16(v, config) 297 case OpAdd32: 298 return rewriteValueAMD64_OpAdd32(v, config) 299 case OpAdd32F: 300 return rewriteValueAMD64_OpAdd32F(v, config) 301 case OpAdd64: 302 return rewriteValueAMD64_OpAdd64(v, config) 303 case OpAdd64F: 304 return rewriteValueAMD64_OpAdd64F(v, config) 305 case OpAdd8: 306 return rewriteValueAMD64_OpAdd8(v, config) 307 case OpAddPtr: 308 return rewriteValueAMD64_OpAddPtr(v, config) 309 case OpAddr: 310 return rewriteValueAMD64_OpAddr(v, config) 311 case OpAnd16: 312 return rewriteValueAMD64_OpAnd16(v, config) 313 case OpAnd32: 314 return rewriteValueAMD64_OpAnd32(v, config) 315 case OpAnd64: 316 return rewriteValueAMD64_OpAnd64(v, config) 317 case OpAnd8: 318 return rewriteValueAMD64_OpAnd8(v, config) 319 case OpAndB: 320 return rewriteValueAMD64_OpAndB(v, config) 321 case OpAtomicAdd32: 322 return rewriteValueAMD64_OpAtomicAdd32(v, config) 323 case OpAtomicAdd64: 324 return rewriteValueAMD64_OpAtomicAdd64(v, config) 325 case OpAtomicAnd8: 326 return rewriteValueAMD64_OpAtomicAnd8(v, config) 327 case OpAtomicCompareAndSwap32: 328 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v, config) 329 case OpAtomicCompareAndSwap64: 330 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v, config) 331 case OpAtomicExchange32: 332 return rewriteValueAMD64_OpAtomicExchange32(v, config) 333 case OpAtomicExchange64: 334 return rewriteValueAMD64_OpAtomicExchange64(v, config) 335 case OpAtomicLoad32: 336 return rewriteValueAMD64_OpAtomicLoad32(v, config) 337 case OpAtomicLoad64: 338 return rewriteValueAMD64_OpAtomicLoad64(v, config) 339 case OpAtomicLoadPtr: 340 return rewriteValueAMD64_OpAtomicLoadPtr(v, config) 341 case OpAtomicOr8: 342 return rewriteValueAMD64_OpAtomicOr8(v, config) 343 case OpAtomicStore32: 344 return rewriteValueAMD64_OpAtomicStore32(v, config) 345 case OpAtomicStore64: 346 return rewriteValueAMD64_OpAtomicStore64(v, config) 347 case OpAtomicStorePtrNoWB: 348 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v, config) 349 case OpAvg64u: 350 return rewriteValueAMD64_OpAvg64u(v, config) 351 case OpBswap32: 352 return rewriteValueAMD64_OpBswap32(v, config) 353 case OpBswap64: 354 return rewriteValueAMD64_OpBswap64(v, config) 355 case OpClosureCall: 356 return rewriteValueAMD64_OpClosureCall(v, config) 357 case OpCom16: 358 return rewriteValueAMD64_OpCom16(v, config) 359 case OpCom32: 360 return rewriteValueAMD64_OpCom32(v, config) 361 case OpCom64: 362 return rewriteValueAMD64_OpCom64(v, config) 363 case OpCom8: 364 return rewriteValueAMD64_OpCom8(v, config) 365 case OpConst16: 366 return rewriteValueAMD64_OpConst16(v, config) 367 case OpConst32: 368 return rewriteValueAMD64_OpConst32(v, config) 369 case OpConst32F: 370 return rewriteValueAMD64_OpConst32F(v, config) 371 case OpConst64: 372 return rewriteValueAMD64_OpConst64(v, config) 373 case OpConst64F: 374 return rewriteValueAMD64_OpConst64F(v, config) 375 case OpConst8: 376 return rewriteValueAMD64_OpConst8(v, config) 377 case OpConstBool: 378 return rewriteValueAMD64_OpConstBool(v, config) 379 case OpConstNil: 380 return rewriteValueAMD64_OpConstNil(v, config) 381 case OpConvert: 382 return rewriteValueAMD64_OpConvert(v, config) 383 case OpCtz32: 384 return rewriteValueAMD64_OpCtz32(v, config) 385 case OpCtz64: 386 return rewriteValueAMD64_OpCtz64(v, config) 387 case OpCvt32Fto32: 388 return rewriteValueAMD64_OpCvt32Fto32(v, config) 389 case OpCvt32Fto64: 390 return rewriteValueAMD64_OpCvt32Fto64(v, config) 391 case OpCvt32Fto64F: 392 return rewriteValueAMD64_OpCvt32Fto64F(v, config) 393 case OpCvt32to32F: 394 return rewriteValueAMD64_OpCvt32to32F(v, config) 395 case OpCvt32to64F: 396 return rewriteValueAMD64_OpCvt32to64F(v, config) 397 case OpCvt64Fto32: 398 return rewriteValueAMD64_OpCvt64Fto32(v, config) 399 case OpCvt64Fto32F: 400 return rewriteValueAMD64_OpCvt64Fto32F(v, config) 401 case OpCvt64Fto64: 402 return rewriteValueAMD64_OpCvt64Fto64(v, config) 403 case OpCvt64to32F: 404 return rewriteValueAMD64_OpCvt64to32F(v, config) 405 case OpCvt64to64F: 406 return rewriteValueAMD64_OpCvt64to64F(v, config) 407 case OpDeferCall: 408 return rewriteValueAMD64_OpDeferCall(v, config) 409 case OpDiv128u: 410 return rewriteValueAMD64_OpDiv128u(v, config) 411 case OpDiv16: 412 return rewriteValueAMD64_OpDiv16(v, config) 413 case OpDiv16u: 414 return rewriteValueAMD64_OpDiv16u(v, config) 415 case OpDiv32: 416 return rewriteValueAMD64_OpDiv32(v, config) 417 case OpDiv32F: 418 return rewriteValueAMD64_OpDiv32F(v, config) 419 case OpDiv32u: 420 return rewriteValueAMD64_OpDiv32u(v, config) 421 case OpDiv64: 422 return rewriteValueAMD64_OpDiv64(v, config) 423 case OpDiv64F: 424 return rewriteValueAMD64_OpDiv64F(v, config) 425 case OpDiv64u: 426 return rewriteValueAMD64_OpDiv64u(v, config) 427 case OpDiv8: 428 return rewriteValueAMD64_OpDiv8(v, config) 429 case OpDiv8u: 430 return rewriteValueAMD64_OpDiv8u(v, config) 431 case OpEq16: 432 return rewriteValueAMD64_OpEq16(v, config) 433 case OpEq32: 434 return rewriteValueAMD64_OpEq32(v, config) 435 case OpEq32F: 436 return rewriteValueAMD64_OpEq32F(v, config) 437 case OpEq64: 438 return rewriteValueAMD64_OpEq64(v, config) 439 case OpEq64F: 440 return rewriteValueAMD64_OpEq64F(v, config) 441 case OpEq8: 442 return rewriteValueAMD64_OpEq8(v, config) 443 case OpEqB: 444 return rewriteValueAMD64_OpEqB(v, config) 445 case OpEqPtr: 446 return rewriteValueAMD64_OpEqPtr(v, config) 447 case OpGeq16: 448 return rewriteValueAMD64_OpGeq16(v, config) 449 case OpGeq16U: 450 return rewriteValueAMD64_OpGeq16U(v, config) 451 case OpGeq32: 452 return rewriteValueAMD64_OpGeq32(v, config) 453 case OpGeq32F: 454 return rewriteValueAMD64_OpGeq32F(v, config) 455 case OpGeq32U: 456 return rewriteValueAMD64_OpGeq32U(v, config) 457 case OpGeq64: 458 return rewriteValueAMD64_OpGeq64(v, config) 459 case OpGeq64F: 460 return rewriteValueAMD64_OpGeq64F(v, config) 461 case OpGeq64U: 462 return rewriteValueAMD64_OpGeq64U(v, config) 463 case OpGeq8: 464 return rewriteValueAMD64_OpGeq8(v, config) 465 case OpGeq8U: 466 return rewriteValueAMD64_OpGeq8U(v, config) 467 case OpGetClosurePtr: 468 return rewriteValueAMD64_OpGetClosurePtr(v, config) 469 case OpGetG: 470 return rewriteValueAMD64_OpGetG(v, config) 471 case OpGoCall: 472 return rewriteValueAMD64_OpGoCall(v, config) 473 case OpGreater16: 474 return rewriteValueAMD64_OpGreater16(v, config) 475 case OpGreater16U: 476 return rewriteValueAMD64_OpGreater16U(v, config) 477 case OpGreater32: 478 return rewriteValueAMD64_OpGreater32(v, config) 479 case OpGreater32F: 480 return rewriteValueAMD64_OpGreater32F(v, config) 481 case OpGreater32U: 482 return rewriteValueAMD64_OpGreater32U(v, config) 483 case OpGreater64: 484 return rewriteValueAMD64_OpGreater64(v, config) 485 case OpGreater64F: 486 return rewriteValueAMD64_OpGreater64F(v, config) 487 case OpGreater64U: 488 return rewriteValueAMD64_OpGreater64U(v, config) 489 case OpGreater8: 490 return rewriteValueAMD64_OpGreater8(v, config) 491 case OpGreater8U: 492 return rewriteValueAMD64_OpGreater8U(v, config) 493 case OpHmul16: 494 return rewriteValueAMD64_OpHmul16(v, config) 495 case OpHmul16u: 496 return rewriteValueAMD64_OpHmul16u(v, config) 497 case OpHmul32: 498 return rewriteValueAMD64_OpHmul32(v, config) 499 case OpHmul32u: 500 return rewriteValueAMD64_OpHmul32u(v, config) 501 case OpHmul64: 502 return rewriteValueAMD64_OpHmul64(v, config) 503 case OpHmul64u: 504 return rewriteValueAMD64_OpHmul64u(v, config) 505 case OpHmul8: 506 return rewriteValueAMD64_OpHmul8(v, config) 507 case OpHmul8u: 508 return rewriteValueAMD64_OpHmul8u(v, config) 509 case OpInt64Hi: 510 return rewriteValueAMD64_OpInt64Hi(v, config) 511 case OpInterCall: 512 return rewriteValueAMD64_OpInterCall(v, config) 513 case OpIsInBounds: 514 return rewriteValueAMD64_OpIsInBounds(v, config) 515 case OpIsNonNil: 516 return rewriteValueAMD64_OpIsNonNil(v, config) 517 case OpIsSliceInBounds: 518 return rewriteValueAMD64_OpIsSliceInBounds(v, config) 519 case OpLeq16: 520 return rewriteValueAMD64_OpLeq16(v, config) 521 case OpLeq16U: 522 return rewriteValueAMD64_OpLeq16U(v, config) 523 case OpLeq32: 524 return rewriteValueAMD64_OpLeq32(v, config) 525 case OpLeq32F: 526 return rewriteValueAMD64_OpLeq32F(v, config) 527 case OpLeq32U: 528 return rewriteValueAMD64_OpLeq32U(v, config) 529 case OpLeq64: 530 return rewriteValueAMD64_OpLeq64(v, config) 531 case OpLeq64F: 532 return rewriteValueAMD64_OpLeq64F(v, config) 533 case OpLeq64U: 534 return rewriteValueAMD64_OpLeq64U(v, config) 535 case OpLeq8: 536 return rewriteValueAMD64_OpLeq8(v, config) 537 case OpLeq8U: 538 return rewriteValueAMD64_OpLeq8U(v, config) 539 case OpLess16: 540 return rewriteValueAMD64_OpLess16(v, config) 541 case OpLess16U: 542 return rewriteValueAMD64_OpLess16U(v, config) 543 case OpLess32: 544 return rewriteValueAMD64_OpLess32(v, config) 545 case OpLess32F: 546 return rewriteValueAMD64_OpLess32F(v, config) 547 case OpLess32U: 548 return rewriteValueAMD64_OpLess32U(v, config) 549 case OpLess64: 550 return rewriteValueAMD64_OpLess64(v, config) 551 case OpLess64F: 552 return rewriteValueAMD64_OpLess64F(v, config) 553 case OpLess64U: 554 return rewriteValueAMD64_OpLess64U(v, config) 555 case OpLess8: 556 return rewriteValueAMD64_OpLess8(v, config) 557 case OpLess8U: 558 return rewriteValueAMD64_OpLess8U(v, config) 559 case OpLoad: 560 return rewriteValueAMD64_OpLoad(v, config) 561 case OpLsh16x16: 562 return rewriteValueAMD64_OpLsh16x16(v, config) 563 case OpLsh16x32: 564 return rewriteValueAMD64_OpLsh16x32(v, config) 565 case OpLsh16x64: 566 return rewriteValueAMD64_OpLsh16x64(v, config) 567 case OpLsh16x8: 568 return rewriteValueAMD64_OpLsh16x8(v, config) 569 case OpLsh32x16: 570 return rewriteValueAMD64_OpLsh32x16(v, config) 571 case OpLsh32x32: 572 return rewriteValueAMD64_OpLsh32x32(v, config) 573 case OpLsh32x64: 574 return rewriteValueAMD64_OpLsh32x64(v, config) 575 case OpLsh32x8: 576 return rewriteValueAMD64_OpLsh32x8(v, config) 577 case OpLsh64x16: 578 return rewriteValueAMD64_OpLsh64x16(v, config) 579 case OpLsh64x32: 580 return rewriteValueAMD64_OpLsh64x32(v, config) 581 case OpLsh64x64: 582 return rewriteValueAMD64_OpLsh64x64(v, config) 583 case OpLsh64x8: 584 return rewriteValueAMD64_OpLsh64x8(v, config) 585 case OpLsh8x16: 586 return rewriteValueAMD64_OpLsh8x16(v, config) 587 case OpLsh8x32: 588 return rewriteValueAMD64_OpLsh8x32(v, config) 589 case OpLsh8x64: 590 return rewriteValueAMD64_OpLsh8x64(v, config) 591 case OpLsh8x8: 592 return rewriteValueAMD64_OpLsh8x8(v, config) 593 case OpMod16: 594 return rewriteValueAMD64_OpMod16(v, config) 595 case OpMod16u: 596 return rewriteValueAMD64_OpMod16u(v, config) 597 case OpMod32: 598 return rewriteValueAMD64_OpMod32(v, config) 599 case OpMod32u: 600 return rewriteValueAMD64_OpMod32u(v, config) 601 case OpMod64: 602 return rewriteValueAMD64_OpMod64(v, config) 603 case OpMod64u: 604 return rewriteValueAMD64_OpMod64u(v, config) 605 case OpMod8: 606 return rewriteValueAMD64_OpMod8(v, config) 607 case OpMod8u: 608 return rewriteValueAMD64_OpMod8u(v, config) 609 case OpMove: 610 return rewriteValueAMD64_OpMove(v, config) 611 case OpMul16: 612 return rewriteValueAMD64_OpMul16(v, config) 613 case OpMul32: 614 return rewriteValueAMD64_OpMul32(v, config) 615 case OpMul32F: 616 return rewriteValueAMD64_OpMul32F(v, config) 617 case OpMul64: 618 return rewriteValueAMD64_OpMul64(v, config) 619 case OpMul64F: 620 return rewriteValueAMD64_OpMul64F(v, config) 621 case OpMul64uhilo: 622 return rewriteValueAMD64_OpMul64uhilo(v, config) 623 case OpMul8: 624 return rewriteValueAMD64_OpMul8(v, config) 625 case OpNeg16: 626 return rewriteValueAMD64_OpNeg16(v, config) 627 case OpNeg32: 628 return rewriteValueAMD64_OpNeg32(v, config) 629 case OpNeg32F: 630 return rewriteValueAMD64_OpNeg32F(v, config) 631 case OpNeg64: 632 return rewriteValueAMD64_OpNeg64(v, config) 633 case OpNeg64F: 634 return rewriteValueAMD64_OpNeg64F(v, config) 635 case OpNeg8: 636 return rewriteValueAMD64_OpNeg8(v, config) 637 case OpNeq16: 638 return rewriteValueAMD64_OpNeq16(v, config) 639 case OpNeq32: 640 return rewriteValueAMD64_OpNeq32(v, config) 641 case OpNeq32F: 642 return rewriteValueAMD64_OpNeq32F(v, config) 643 case OpNeq64: 644 return rewriteValueAMD64_OpNeq64(v, config) 645 case OpNeq64F: 646 return rewriteValueAMD64_OpNeq64F(v, config) 647 case OpNeq8: 648 return rewriteValueAMD64_OpNeq8(v, config) 649 case OpNeqB: 650 return rewriteValueAMD64_OpNeqB(v, config) 651 case OpNeqPtr: 652 return rewriteValueAMD64_OpNeqPtr(v, config) 653 case OpNilCheck: 654 return rewriteValueAMD64_OpNilCheck(v, config) 655 case OpNot: 656 return rewriteValueAMD64_OpNot(v, config) 657 case OpOffPtr: 658 return rewriteValueAMD64_OpOffPtr(v, config) 659 case OpOr16: 660 return rewriteValueAMD64_OpOr16(v, config) 661 case OpOr32: 662 return rewriteValueAMD64_OpOr32(v, config) 663 case OpOr64: 664 return rewriteValueAMD64_OpOr64(v, config) 665 case OpOr8: 666 return rewriteValueAMD64_OpOr8(v, config) 667 case OpOrB: 668 return rewriteValueAMD64_OpOrB(v, config) 669 case OpRsh16Ux16: 670 return rewriteValueAMD64_OpRsh16Ux16(v, config) 671 case OpRsh16Ux32: 672 return rewriteValueAMD64_OpRsh16Ux32(v, config) 673 case OpRsh16Ux64: 674 return rewriteValueAMD64_OpRsh16Ux64(v, config) 675 case OpRsh16Ux8: 676 return rewriteValueAMD64_OpRsh16Ux8(v, config) 677 case OpRsh16x16: 678 return rewriteValueAMD64_OpRsh16x16(v, config) 679 case OpRsh16x32: 680 return rewriteValueAMD64_OpRsh16x32(v, config) 681 case OpRsh16x64: 682 return rewriteValueAMD64_OpRsh16x64(v, config) 683 case OpRsh16x8: 684 return rewriteValueAMD64_OpRsh16x8(v, config) 685 case OpRsh32Ux16: 686 return rewriteValueAMD64_OpRsh32Ux16(v, config) 687 case OpRsh32Ux32: 688 return rewriteValueAMD64_OpRsh32Ux32(v, config) 689 case OpRsh32Ux64: 690 return rewriteValueAMD64_OpRsh32Ux64(v, config) 691 case OpRsh32Ux8: 692 return rewriteValueAMD64_OpRsh32Ux8(v, config) 693 case OpRsh32x16: 694 return rewriteValueAMD64_OpRsh32x16(v, config) 695 case OpRsh32x32: 696 return rewriteValueAMD64_OpRsh32x32(v, config) 697 case OpRsh32x64: 698 return rewriteValueAMD64_OpRsh32x64(v, config) 699 case OpRsh32x8: 700 return rewriteValueAMD64_OpRsh32x8(v, config) 701 case OpRsh64Ux16: 702 return rewriteValueAMD64_OpRsh64Ux16(v, config) 703 case OpRsh64Ux32: 704 return rewriteValueAMD64_OpRsh64Ux32(v, config) 705 case OpRsh64Ux64: 706 return rewriteValueAMD64_OpRsh64Ux64(v, config) 707 case OpRsh64Ux8: 708 return rewriteValueAMD64_OpRsh64Ux8(v, config) 709 case OpRsh64x16: 710 return rewriteValueAMD64_OpRsh64x16(v, config) 711 case OpRsh64x32: 712 return rewriteValueAMD64_OpRsh64x32(v, config) 713 case OpRsh64x64: 714 return rewriteValueAMD64_OpRsh64x64(v, config) 715 case OpRsh64x8: 716 return rewriteValueAMD64_OpRsh64x8(v, config) 717 case OpRsh8Ux16: 718 return rewriteValueAMD64_OpRsh8Ux16(v, config) 719 case OpRsh8Ux32: 720 return rewriteValueAMD64_OpRsh8Ux32(v, config) 721 case OpRsh8Ux64: 722 return rewriteValueAMD64_OpRsh8Ux64(v, config) 723 case OpRsh8Ux8: 724 return rewriteValueAMD64_OpRsh8Ux8(v, config) 725 case OpRsh8x16: 726 return rewriteValueAMD64_OpRsh8x16(v, config) 727 case OpRsh8x32: 728 return rewriteValueAMD64_OpRsh8x32(v, config) 729 case OpRsh8x64: 730 return rewriteValueAMD64_OpRsh8x64(v, config) 731 case OpRsh8x8: 732 return rewriteValueAMD64_OpRsh8x8(v, config) 733 case OpSelect0: 734 return rewriteValueAMD64_OpSelect0(v, config) 735 case OpSelect1: 736 return rewriteValueAMD64_OpSelect1(v, config) 737 case OpSignExt16to32: 738 return rewriteValueAMD64_OpSignExt16to32(v, config) 739 case OpSignExt16to64: 740 return rewriteValueAMD64_OpSignExt16to64(v, config) 741 case OpSignExt32to64: 742 return rewriteValueAMD64_OpSignExt32to64(v, config) 743 case OpSignExt8to16: 744 return rewriteValueAMD64_OpSignExt8to16(v, config) 745 case OpSignExt8to32: 746 return rewriteValueAMD64_OpSignExt8to32(v, config) 747 case OpSignExt8to64: 748 return rewriteValueAMD64_OpSignExt8to64(v, config) 749 case OpSlicemask: 750 return rewriteValueAMD64_OpSlicemask(v, config) 751 case OpSqrt: 752 return rewriteValueAMD64_OpSqrt(v, config) 753 case OpStaticCall: 754 return rewriteValueAMD64_OpStaticCall(v, config) 755 case OpStore: 756 return rewriteValueAMD64_OpStore(v, config) 757 case OpSub16: 758 return rewriteValueAMD64_OpSub16(v, config) 759 case OpSub32: 760 return rewriteValueAMD64_OpSub32(v, config) 761 case OpSub32F: 762 return rewriteValueAMD64_OpSub32F(v, config) 763 case OpSub64: 764 return rewriteValueAMD64_OpSub64(v, config) 765 case OpSub64F: 766 return rewriteValueAMD64_OpSub64F(v, config) 767 case OpSub8: 768 return rewriteValueAMD64_OpSub8(v, config) 769 case OpSubPtr: 770 return rewriteValueAMD64_OpSubPtr(v, config) 771 case OpTrunc16to8: 772 return rewriteValueAMD64_OpTrunc16to8(v, config) 773 case OpTrunc32to16: 774 return rewriteValueAMD64_OpTrunc32to16(v, config) 775 case OpTrunc32to8: 776 return rewriteValueAMD64_OpTrunc32to8(v, config) 777 case OpTrunc64to16: 778 return rewriteValueAMD64_OpTrunc64to16(v, config) 779 case OpTrunc64to32: 780 return rewriteValueAMD64_OpTrunc64to32(v, config) 781 case OpTrunc64to8: 782 return rewriteValueAMD64_OpTrunc64to8(v, config) 783 case OpXor16: 784 return rewriteValueAMD64_OpXor16(v, config) 785 case OpXor32: 786 return rewriteValueAMD64_OpXor32(v, config) 787 case OpXor64: 788 return rewriteValueAMD64_OpXor64(v, config) 789 case OpXor8: 790 return rewriteValueAMD64_OpXor8(v, config) 791 case OpZero: 792 return rewriteValueAMD64_OpZero(v, config) 793 case OpZeroExt16to32: 794 return rewriteValueAMD64_OpZeroExt16to32(v, config) 795 case OpZeroExt16to64: 796 return rewriteValueAMD64_OpZeroExt16to64(v, config) 797 case OpZeroExt32to64: 798 return rewriteValueAMD64_OpZeroExt32to64(v, config) 799 case OpZeroExt8to16: 800 return rewriteValueAMD64_OpZeroExt8to16(v, config) 801 case OpZeroExt8to32: 802 return rewriteValueAMD64_OpZeroExt8to32(v, config) 803 case OpZeroExt8to64: 804 return rewriteValueAMD64_OpZeroExt8to64(v, config) 805 } 806 return false 807 } 808 func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool { 809 b := v.Block 810 _ = b 811 // match: (ADDL x (MOVLconst [c])) 812 // cond: 813 // result: (ADDLconst [c] x) 814 for { 815 x := v.Args[0] 816 v_1 := v.Args[1] 817 if v_1.Op != OpAMD64MOVLconst { 818 break 819 } 820 c := v_1.AuxInt 821 v.reset(OpAMD64ADDLconst) 822 v.AuxInt = c 823 v.AddArg(x) 824 return true 825 } 826 // match: (ADDL (MOVLconst [c]) x) 827 // cond: 828 // result: (ADDLconst [c] x) 829 for { 830 v_0 := v.Args[0] 831 if v_0.Op != OpAMD64MOVLconst { 832 break 833 } 834 c := v_0.AuxInt 835 x := v.Args[1] 836 v.reset(OpAMD64ADDLconst) 837 v.AuxInt = c 838 v.AddArg(x) 839 return true 840 } 841 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [32-c])) 842 // cond: 843 // result: (ROLLconst x [ c]) 844 for { 845 v_0 := v.Args[0] 846 if v_0.Op != OpAMD64SHLLconst { 847 break 848 } 849 c := v_0.AuxInt 850 x := v_0.Args[0] 851 v_1 := v.Args[1] 852 if v_1.Op != OpAMD64SHRLconst { 853 break 854 } 855 if v_1.AuxInt != 32-c { 856 break 857 } 858 if x != v_1.Args[0] { 859 break 860 } 861 v.reset(OpAMD64ROLLconst) 862 v.AuxInt = c 863 v.AddArg(x) 864 return true 865 } 866 // match: (ADDL (SHRLconst x [c]) (SHLLconst x [32-c])) 867 // cond: 868 // result: (ROLLconst x [32-c]) 869 for { 870 v_0 := v.Args[0] 871 if v_0.Op != OpAMD64SHRLconst { 872 break 873 } 874 c := v_0.AuxInt 875 x := v_0.Args[0] 876 v_1 := v.Args[1] 877 if v_1.Op != OpAMD64SHLLconst { 878 break 879 } 880 if v_1.AuxInt != 32-c { 881 break 882 } 883 if x != v_1.Args[0] { 884 break 885 } 886 v.reset(OpAMD64ROLLconst) 887 v.AuxInt = 32 - c 888 v.AddArg(x) 889 return true 890 } 891 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [16-c])) 892 // cond: c < 16 && t.Size() == 2 893 // result: (ROLWconst x [ c]) 894 for { 895 t := v.Type 896 v_0 := v.Args[0] 897 if v_0.Op != OpAMD64SHLLconst { 898 break 899 } 900 c := v_0.AuxInt 901 x := v_0.Args[0] 902 v_1 := v.Args[1] 903 if v_1.Op != OpAMD64SHRWconst { 904 break 905 } 906 if v_1.AuxInt != 16-c { 907 break 908 } 909 if x != v_1.Args[0] { 910 break 911 } 912 if !(c < 16 && t.Size() == 2) { 913 break 914 } 915 v.reset(OpAMD64ROLWconst) 916 v.AuxInt = c 917 v.AddArg(x) 918 return true 919 } 920 // match: (ADDL <t> (SHRWconst x [c]) (SHLLconst x [16-c])) 921 // cond: c > 0 && t.Size() == 2 922 // result: (ROLWconst x [16-c]) 923 for { 924 t := v.Type 925 v_0 := v.Args[0] 926 if v_0.Op != OpAMD64SHRWconst { 927 break 928 } 929 c := v_0.AuxInt 930 x := v_0.Args[0] 931 v_1 := v.Args[1] 932 if v_1.Op != OpAMD64SHLLconst { 933 break 934 } 935 if v_1.AuxInt != 16-c { 936 break 937 } 938 if x != v_1.Args[0] { 939 break 940 } 941 if !(c > 0 && t.Size() == 2) { 942 break 943 } 944 v.reset(OpAMD64ROLWconst) 945 v.AuxInt = 16 - c 946 v.AddArg(x) 947 return true 948 } 949 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [ 8-c])) 950 // cond: c < 8 && t.Size() == 1 951 // result: (ROLBconst x [ c]) 952 for { 953 t := v.Type 954 v_0 := v.Args[0] 955 if v_0.Op != OpAMD64SHLLconst { 956 break 957 } 958 c := v_0.AuxInt 959 x := v_0.Args[0] 960 v_1 := v.Args[1] 961 if v_1.Op != OpAMD64SHRBconst { 962 break 963 } 964 if v_1.AuxInt != 8-c { 965 break 966 } 967 if x != v_1.Args[0] { 968 break 969 } 970 if !(c < 8 && t.Size() == 1) { 971 break 972 } 973 v.reset(OpAMD64ROLBconst) 974 v.AuxInt = c 975 v.AddArg(x) 976 return true 977 } 978 // match: (ADDL <t> (SHRBconst x [c]) (SHLLconst x [ 8-c])) 979 // cond: c > 0 && t.Size() == 1 980 // result: (ROLBconst x [ 8-c]) 981 for { 982 t := v.Type 983 v_0 := v.Args[0] 984 if v_0.Op != OpAMD64SHRBconst { 985 break 986 } 987 c := v_0.AuxInt 988 x := v_0.Args[0] 989 v_1 := v.Args[1] 990 if v_1.Op != OpAMD64SHLLconst { 991 break 992 } 993 if v_1.AuxInt != 8-c { 994 break 995 } 996 if x != v_1.Args[0] { 997 break 998 } 999 if !(c > 0 && t.Size() == 1) { 1000 break 1001 } 1002 v.reset(OpAMD64ROLBconst) 1003 v.AuxInt = 8 - c 1004 v.AddArg(x) 1005 return true 1006 } 1007 // match: (ADDL x (NEGL y)) 1008 // cond: 1009 // result: (SUBL x y) 1010 for { 1011 x := v.Args[0] 1012 v_1 := v.Args[1] 1013 if v_1.Op != OpAMD64NEGL { 1014 break 1015 } 1016 y := v_1.Args[0] 1017 v.reset(OpAMD64SUBL) 1018 v.AddArg(x) 1019 v.AddArg(y) 1020 return true 1021 } 1022 return false 1023 } 1024 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool { 1025 b := v.Block 1026 _ = b 1027 // match: (ADDLconst [c] x) 1028 // cond: int32(c)==0 1029 // result: x 1030 for { 1031 c := v.AuxInt 1032 x := v.Args[0] 1033 if !(int32(c) == 0) { 1034 break 1035 } 1036 v.reset(OpCopy) 1037 v.Type = x.Type 1038 v.AddArg(x) 1039 return true 1040 } 1041 // match: (ADDLconst [c] (MOVLconst [d])) 1042 // cond: 1043 // result: (MOVLconst [int64(int32(c+d))]) 1044 for { 1045 c := v.AuxInt 1046 v_0 := v.Args[0] 1047 if v_0.Op != OpAMD64MOVLconst { 1048 break 1049 } 1050 d := v_0.AuxInt 1051 v.reset(OpAMD64MOVLconst) 1052 v.AuxInt = int64(int32(c + d)) 1053 return true 1054 } 1055 // match: (ADDLconst [c] (ADDLconst [d] x)) 1056 // cond: 1057 // result: (ADDLconst [int64(int32(c+d))] x) 1058 for { 1059 c := v.AuxInt 1060 v_0 := v.Args[0] 1061 if v_0.Op != OpAMD64ADDLconst { 1062 break 1063 } 1064 d := v_0.AuxInt 1065 x := v_0.Args[0] 1066 v.reset(OpAMD64ADDLconst) 1067 v.AuxInt = int64(int32(c + d)) 1068 v.AddArg(x) 1069 return true 1070 } 1071 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1072 // cond: is32Bit(c+d) 1073 // result: (LEAL [c+d] {s} x) 1074 for { 1075 c := v.AuxInt 1076 v_0 := v.Args[0] 1077 if v_0.Op != OpAMD64LEAL { 1078 break 1079 } 1080 d := v_0.AuxInt 1081 s := v_0.Aux 1082 x := v_0.Args[0] 1083 if !(is32Bit(c + d)) { 1084 break 1085 } 1086 v.reset(OpAMD64LEAL) 1087 v.AuxInt = c + d 1088 v.Aux = s 1089 v.AddArg(x) 1090 return true 1091 } 1092 return false 1093 } 1094 func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool { 1095 b := v.Block 1096 _ = b 1097 // match: (ADDQ x (MOVQconst [c])) 1098 // cond: is32Bit(c) 1099 // result: (ADDQconst [c] x) 1100 for { 1101 x := v.Args[0] 1102 v_1 := v.Args[1] 1103 if v_1.Op != OpAMD64MOVQconst { 1104 break 1105 } 1106 c := v_1.AuxInt 1107 if !(is32Bit(c)) { 1108 break 1109 } 1110 v.reset(OpAMD64ADDQconst) 1111 v.AuxInt = c 1112 v.AddArg(x) 1113 return true 1114 } 1115 // match: (ADDQ (MOVQconst [c]) x) 1116 // cond: is32Bit(c) 1117 // result: (ADDQconst [c] x) 1118 for { 1119 v_0 := v.Args[0] 1120 if v_0.Op != OpAMD64MOVQconst { 1121 break 1122 } 1123 c := v_0.AuxInt 1124 x := v.Args[1] 1125 if !(is32Bit(c)) { 1126 break 1127 } 1128 v.reset(OpAMD64ADDQconst) 1129 v.AuxInt = c 1130 v.AddArg(x) 1131 return true 1132 } 1133 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [64-c])) 1134 // cond: 1135 // result: (ROLQconst x [ c]) 1136 for { 1137 v_0 := v.Args[0] 1138 if v_0.Op != OpAMD64SHLQconst { 1139 break 1140 } 1141 c := v_0.AuxInt 1142 x := v_0.Args[0] 1143 v_1 := v.Args[1] 1144 if v_1.Op != OpAMD64SHRQconst { 1145 break 1146 } 1147 if v_1.AuxInt != 64-c { 1148 break 1149 } 1150 if x != v_1.Args[0] { 1151 break 1152 } 1153 v.reset(OpAMD64ROLQconst) 1154 v.AuxInt = c 1155 v.AddArg(x) 1156 return true 1157 } 1158 // match: (ADDQ (SHRQconst x [c]) (SHLQconst x [64-c])) 1159 // cond: 1160 // result: (ROLQconst x [64-c]) 1161 for { 1162 v_0 := v.Args[0] 1163 if v_0.Op != OpAMD64SHRQconst { 1164 break 1165 } 1166 c := v_0.AuxInt 1167 x := v_0.Args[0] 1168 v_1 := v.Args[1] 1169 if v_1.Op != OpAMD64SHLQconst { 1170 break 1171 } 1172 if v_1.AuxInt != 64-c { 1173 break 1174 } 1175 if x != v_1.Args[0] { 1176 break 1177 } 1178 v.reset(OpAMD64ROLQconst) 1179 v.AuxInt = 64 - c 1180 v.AddArg(x) 1181 return true 1182 } 1183 // match: (ADDQ x (SHLQconst [3] y)) 1184 // cond: 1185 // result: (LEAQ8 x y) 1186 for { 1187 x := v.Args[0] 1188 v_1 := v.Args[1] 1189 if v_1.Op != OpAMD64SHLQconst { 1190 break 1191 } 1192 if v_1.AuxInt != 3 { 1193 break 1194 } 1195 y := v_1.Args[0] 1196 v.reset(OpAMD64LEAQ8) 1197 v.AddArg(x) 1198 v.AddArg(y) 1199 return true 1200 } 1201 // match: (ADDQ x (SHLQconst [2] y)) 1202 // cond: 1203 // result: (LEAQ4 x y) 1204 for { 1205 x := v.Args[0] 1206 v_1 := v.Args[1] 1207 if v_1.Op != OpAMD64SHLQconst { 1208 break 1209 } 1210 if v_1.AuxInt != 2 { 1211 break 1212 } 1213 y := v_1.Args[0] 1214 v.reset(OpAMD64LEAQ4) 1215 v.AddArg(x) 1216 v.AddArg(y) 1217 return true 1218 } 1219 // match: (ADDQ x (SHLQconst [1] y)) 1220 // cond: 1221 // result: (LEAQ2 x y) 1222 for { 1223 x := v.Args[0] 1224 v_1 := v.Args[1] 1225 if v_1.Op != OpAMD64SHLQconst { 1226 break 1227 } 1228 if v_1.AuxInt != 1 { 1229 break 1230 } 1231 y := v_1.Args[0] 1232 v.reset(OpAMD64LEAQ2) 1233 v.AddArg(x) 1234 v.AddArg(y) 1235 return true 1236 } 1237 // match: (ADDQ x (ADDQ y y)) 1238 // cond: 1239 // result: (LEAQ2 x y) 1240 for { 1241 x := v.Args[0] 1242 v_1 := v.Args[1] 1243 if v_1.Op != OpAMD64ADDQ { 1244 break 1245 } 1246 y := v_1.Args[0] 1247 if y != v_1.Args[1] { 1248 break 1249 } 1250 v.reset(OpAMD64LEAQ2) 1251 v.AddArg(x) 1252 v.AddArg(y) 1253 return true 1254 } 1255 // match: (ADDQ x (ADDQ x y)) 1256 // cond: 1257 // result: (LEAQ2 y x) 1258 for { 1259 x := v.Args[0] 1260 v_1 := v.Args[1] 1261 if v_1.Op != OpAMD64ADDQ { 1262 break 1263 } 1264 if x != v_1.Args[0] { 1265 break 1266 } 1267 y := v_1.Args[1] 1268 v.reset(OpAMD64LEAQ2) 1269 v.AddArg(y) 1270 v.AddArg(x) 1271 return true 1272 } 1273 // match: (ADDQ x (ADDQ y x)) 1274 // cond: 1275 // result: (LEAQ2 y x) 1276 for { 1277 x := v.Args[0] 1278 v_1 := v.Args[1] 1279 if v_1.Op != OpAMD64ADDQ { 1280 break 1281 } 1282 y := v_1.Args[0] 1283 if x != v_1.Args[1] { 1284 break 1285 } 1286 v.reset(OpAMD64LEAQ2) 1287 v.AddArg(y) 1288 v.AddArg(x) 1289 return true 1290 } 1291 // match: (ADDQ (ADDQconst [c] x) y) 1292 // cond: 1293 // result: (LEAQ1 [c] x y) 1294 for { 1295 v_0 := v.Args[0] 1296 if v_0.Op != OpAMD64ADDQconst { 1297 break 1298 } 1299 c := v_0.AuxInt 1300 x := v_0.Args[0] 1301 y := v.Args[1] 1302 v.reset(OpAMD64LEAQ1) 1303 v.AuxInt = c 1304 v.AddArg(x) 1305 v.AddArg(y) 1306 return true 1307 } 1308 // match: (ADDQ x (ADDQconst [c] y)) 1309 // cond: 1310 // result: (LEAQ1 [c] x y) 1311 for { 1312 x := v.Args[0] 1313 v_1 := v.Args[1] 1314 if v_1.Op != OpAMD64ADDQconst { 1315 break 1316 } 1317 c := v_1.AuxInt 1318 y := v_1.Args[0] 1319 v.reset(OpAMD64LEAQ1) 1320 v.AuxInt = c 1321 v.AddArg(x) 1322 v.AddArg(y) 1323 return true 1324 } 1325 // match: (ADDQ x (LEAQ [c] {s} y)) 1326 // cond: x.Op != OpSB && y.Op != OpSB 1327 // result: (LEAQ1 [c] {s} x y) 1328 for { 1329 x := v.Args[0] 1330 v_1 := v.Args[1] 1331 if v_1.Op != OpAMD64LEAQ { 1332 break 1333 } 1334 c := v_1.AuxInt 1335 s := v_1.Aux 1336 y := v_1.Args[0] 1337 if !(x.Op != OpSB && y.Op != OpSB) { 1338 break 1339 } 1340 v.reset(OpAMD64LEAQ1) 1341 v.AuxInt = c 1342 v.Aux = s 1343 v.AddArg(x) 1344 v.AddArg(y) 1345 return true 1346 } 1347 // match: (ADDQ (LEAQ [c] {s} x) y) 1348 // cond: x.Op != OpSB && y.Op != OpSB 1349 // result: (LEAQ1 [c] {s} x y) 1350 for { 1351 v_0 := v.Args[0] 1352 if v_0.Op != OpAMD64LEAQ { 1353 break 1354 } 1355 c := v_0.AuxInt 1356 s := v_0.Aux 1357 x := v_0.Args[0] 1358 y := v.Args[1] 1359 if !(x.Op != OpSB && y.Op != OpSB) { 1360 break 1361 } 1362 v.reset(OpAMD64LEAQ1) 1363 v.AuxInt = c 1364 v.Aux = s 1365 v.AddArg(x) 1366 v.AddArg(y) 1367 return true 1368 } 1369 // match: (ADDQ x (NEGQ y)) 1370 // cond: 1371 // result: (SUBQ x y) 1372 for { 1373 x := v.Args[0] 1374 v_1 := v.Args[1] 1375 if v_1.Op != OpAMD64NEGQ { 1376 break 1377 } 1378 y := v_1.Args[0] 1379 v.reset(OpAMD64SUBQ) 1380 v.AddArg(x) 1381 v.AddArg(y) 1382 return true 1383 } 1384 return false 1385 } 1386 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool { 1387 b := v.Block 1388 _ = b 1389 // match: (ADDQconst [c] (ADDQ x y)) 1390 // cond: 1391 // result: (LEAQ1 [c] x y) 1392 for { 1393 c := v.AuxInt 1394 v_0 := v.Args[0] 1395 if v_0.Op != OpAMD64ADDQ { 1396 break 1397 } 1398 x := v_0.Args[0] 1399 y := v_0.Args[1] 1400 v.reset(OpAMD64LEAQ1) 1401 v.AuxInt = c 1402 v.AddArg(x) 1403 v.AddArg(y) 1404 return true 1405 } 1406 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1407 // cond: is32Bit(c+d) 1408 // result: (LEAQ [c+d] {s} x) 1409 for { 1410 c := v.AuxInt 1411 v_0 := v.Args[0] 1412 if v_0.Op != OpAMD64LEAQ { 1413 break 1414 } 1415 d := v_0.AuxInt 1416 s := v_0.Aux 1417 x := v_0.Args[0] 1418 if !(is32Bit(c + d)) { 1419 break 1420 } 1421 v.reset(OpAMD64LEAQ) 1422 v.AuxInt = c + d 1423 v.Aux = s 1424 v.AddArg(x) 1425 return true 1426 } 1427 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1428 // cond: is32Bit(c+d) 1429 // result: (LEAQ1 [c+d] {s} x y) 1430 for { 1431 c := v.AuxInt 1432 v_0 := v.Args[0] 1433 if v_0.Op != OpAMD64LEAQ1 { 1434 break 1435 } 1436 d := v_0.AuxInt 1437 s := v_0.Aux 1438 x := v_0.Args[0] 1439 y := v_0.Args[1] 1440 if !(is32Bit(c + d)) { 1441 break 1442 } 1443 v.reset(OpAMD64LEAQ1) 1444 v.AuxInt = c + d 1445 v.Aux = s 1446 v.AddArg(x) 1447 v.AddArg(y) 1448 return true 1449 } 1450 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1451 // cond: is32Bit(c+d) 1452 // result: (LEAQ2 [c+d] {s} x y) 1453 for { 1454 c := v.AuxInt 1455 v_0 := v.Args[0] 1456 if v_0.Op != OpAMD64LEAQ2 { 1457 break 1458 } 1459 d := v_0.AuxInt 1460 s := v_0.Aux 1461 x := v_0.Args[0] 1462 y := v_0.Args[1] 1463 if !(is32Bit(c + d)) { 1464 break 1465 } 1466 v.reset(OpAMD64LEAQ2) 1467 v.AuxInt = c + d 1468 v.Aux = s 1469 v.AddArg(x) 1470 v.AddArg(y) 1471 return true 1472 } 1473 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1474 // cond: is32Bit(c+d) 1475 // result: (LEAQ4 [c+d] {s} x y) 1476 for { 1477 c := v.AuxInt 1478 v_0 := v.Args[0] 1479 if v_0.Op != OpAMD64LEAQ4 { 1480 break 1481 } 1482 d := v_0.AuxInt 1483 s := v_0.Aux 1484 x := v_0.Args[0] 1485 y := v_0.Args[1] 1486 if !(is32Bit(c + d)) { 1487 break 1488 } 1489 v.reset(OpAMD64LEAQ4) 1490 v.AuxInt = c + d 1491 v.Aux = s 1492 v.AddArg(x) 1493 v.AddArg(y) 1494 return true 1495 } 1496 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1497 // cond: is32Bit(c+d) 1498 // result: (LEAQ8 [c+d] {s} x y) 1499 for { 1500 c := v.AuxInt 1501 v_0 := v.Args[0] 1502 if v_0.Op != OpAMD64LEAQ8 { 1503 break 1504 } 1505 d := v_0.AuxInt 1506 s := v_0.Aux 1507 x := v_0.Args[0] 1508 y := v_0.Args[1] 1509 if !(is32Bit(c + d)) { 1510 break 1511 } 1512 v.reset(OpAMD64LEAQ8) 1513 v.AuxInt = c + d 1514 v.Aux = s 1515 v.AddArg(x) 1516 v.AddArg(y) 1517 return true 1518 } 1519 // match: (ADDQconst [0] x) 1520 // cond: 1521 // result: x 1522 for { 1523 if v.AuxInt != 0 { 1524 break 1525 } 1526 x := v.Args[0] 1527 v.reset(OpCopy) 1528 v.Type = x.Type 1529 v.AddArg(x) 1530 return true 1531 } 1532 // match: (ADDQconst [c] (MOVQconst [d])) 1533 // cond: 1534 // result: (MOVQconst [c+d]) 1535 for { 1536 c := v.AuxInt 1537 v_0 := v.Args[0] 1538 if v_0.Op != OpAMD64MOVQconst { 1539 break 1540 } 1541 d := v_0.AuxInt 1542 v.reset(OpAMD64MOVQconst) 1543 v.AuxInt = c + d 1544 return true 1545 } 1546 // match: (ADDQconst [c] (ADDQconst [d] x)) 1547 // cond: is32Bit(c+d) 1548 // result: (ADDQconst [c+d] x) 1549 for { 1550 c := v.AuxInt 1551 v_0 := v.Args[0] 1552 if v_0.Op != OpAMD64ADDQconst { 1553 break 1554 } 1555 d := v_0.AuxInt 1556 x := v_0.Args[0] 1557 if !(is32Bit(c + d)) { 1558 break 1559 } 1560 v.reset(OpAMD64ADDQconst) 1561 v.AuxInt = c + d 1562 v.AddArg(x) 1563 return true 1564 } 1565 return false 1566 } 1567 func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool { 1568 b := v.Block 1569 _ = b 1570 // match: (ANDL x (MOVLconst [c])) 1571 // cond: 1572 // result: (ANDLconst [c] x) 1573 for { 1574 x := v.Args[0] 1575 v_1 := v.Args[1] 1576 if v_1.Op != OpAMD64MOVLconst { 1577 break 1578 } 1579 c := v_1.AuxInt 1580 v.reset(OpAMD64ANDLconst) 1581 v.AuxInt = c 1582 v.AddArg(x) 1583 return true 1584 } 1585 // match: (ANDL (MOVLconst [c]) x) 1586 // cond: 1587 // result: (ANDLconst [c] x) 1588 for { 1589 v_0 := v.Args[0] 1590 if v_0.Op != OpAMD64MOVLconst { 1591 break 1592 } 1593 c := v_0.AuxInt 1594 x := v.Args[1] 1595 v.reset(OpAMD64ANDLconst) 1596 v.AuxInt = c 1597 v.AddArg(x) 1598 return true 1599 } 1600 // match: (ANDL x x) 1601 // cond: 1602 // result: x 1603 for { 1604 x := v.Args[0] 1605 if x != v.Args[1] { 1606 break 1607 } 1608 v.reset(OpCopy) 1609 v.Type = x.Type 1610 v.AddArg(x) 1611 return true 1612 } 1613 return false 1614 } 1615 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool { 1616 b := v.Block 1617 _ = b 1618 // match: (ANDLconst [c] (ANDLconst [d] x)) 1619 // cond: 1620 // result: (ANDLconst [c & d] x) 1621 for { 1622 c := v.AuxInt 1623 v_0 := v.Args[0] 1624 if v_0.Op != OpAMD64ANDLconst { 1625 break 1626 } 1627 d := v_0.AuxInt 1628 x := v_0.Args[0] 1629 v.reset(OpAMD64ANDLconst) 1630 v.AuxInt = c & d 1631 v.AddArg(x) 1632 return true 1633 } 1634 // match: (ANDLconst [0xFF] x) 1635 // cond: 1636 // result: (MOVBQZX x) 1637 for { 1638 if v.AuxInt != 0xFF { 1639 break 1640 } 1641 x := v.Args[0] 1642 v.reset(OpAMD64MOVBQZX) 1643 v.AddArg(x) 1644 return true 1645 } 1646 // match: (ANDLconst [0xFFFF] x) 1647 // cond: 1648 // result: (MOVWQZX x) 1649 for { 1650 if v.AuxInt != 0xFFFF { 1651 break 1652 } 1653 x := v.Args[0] 1654 v.reset(OpAMD64MOVWQZX) 1655 v.AddArg(x) 1656 return true 1657 } 1658 // match: (ANDLconst [c] _) 1659 // cond: int32(c)==0 1660 // result: (MOVLconst [0]) 1661 for { 1662 c := v.AuxInt 1663 if !(int32(c) == 0) { 1664 break 1665 } 1666 v.reset(OpAMD64MOVLconst) 1667 v.AuxInt = 0 1668 return true 1669 } 1670 // match: (ANDLconst [c] x) 1671 // cond: int32(c)==-1 1672 // result: x 1673 for { 1674 c := v.AuxInt 1675 x := v.Args[0] 1676 if !(int32(c) == -1) { 1677 break 1678 } 1679 v.reset(OpCopy) 1680 v.Type = x.Type 1681 v.AddArg(x) 1682 return true 1683 } 1684 // match: (ANDLconst [c] (MOVLconst [d])) 1685 // cond: 1686 // result: (MOVLconst [c&d]) 1687 for { 1688 c := v.AuxInt 1689 v_0 := v.Args[0] 1690 if v_0.Op != OpAMD64MOVLconst { 1691 break 1692 } 1693 d := v_0.AuxInt 1694 v.reset(OpAMD64MOVLconst) 1695 v.AuxInt = c & d 1696 return true 1697 } 1698 return false 1699 } 1700 func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool { 1701 b := v.Block 1702 _ = b 1703 // match: (ANDQ x (MOVQconst [c])) 1704 // cond: is32Bit(c) 1705 // result: (ANDQconst [c] x) 1706 for { 1707 x := v.Args[0] 1708 v_1 := v.Args[1] 1709 if v_1.Op != OpAMD64MOVQconst { 1710 break 1711 } 1712 c := v_1.AuxInt 1713 if !(is32Bit(c)) { 1714 break 1715 } 1716 v.reset(OpAMD64ANDQconst) 1717 v.AuxInt = c 1718 v.AddArg(x) 1719 return true 1720 } 1721 // match: (ANDQ (MOVQconst [c]) x) 1722 // cond: is32Bit(c) 1723 // result: (ANDQconst [c] x) 1724 for { 1725 v_0 := v.Args[0] 1726 if v_0.Op != OpAMD64MOVQconst { 1727 break 1728 } 1729 c := v_0.AuxInt 1730 x := v.Args[1] 1731 if !(is32Bit(c)) { 1732 break 1733 } 1734 v.reset(OpAMD64ANDQconst) 1735 v.AuxInt = c 1736 v.AddArg(x) 1737 return true 1738 } 1739 // match: (ANDQ x x) 1740 // cond: 1741 // result: x 1742 for { 1743 x := v.Args[0] 1744 if x != v.Args[1] { 1745 break 1746 } 1747 v.reset(OpCopy) 1748 v.Type = x.Type 1749 v.AddArg(x) 1750 return true 1751 } 1752 return false 1753 } 1754 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { 1755 b := v.Block 1756 _ = b 1757 // match: (ANDQconst [c] (ANDQconst [d] x)) 1758 // cond: 1759 // result: (ANDQconst [c & d] x) 1760 for { 1761 c := v.AuxInt 1762 v_0 := v.Args[0] 1763 if v_0.Op != OpAMD64ANDQconst { 1764 break 1765 } 1766 d := v_0.AuxInt 1767 x := v_0.Args[0] 1768 v.reset(OpAMD64ANDQconst) 1769 v.AuxInt = c & d 1770 v.AddArg(x) 1771 return true 1772 } 1773 // match: (ANDQconst [0xFF] x) 1774 // cond: 1775 // result: (MOVBQZX x) 1776 for { 1777 if v.AuxInt != 0xFF { 1778 break 1779 } 1780 x := v.Args[0] 1781 v.reset(OpAMD64MOVBQZX) 1782 v.AddArg(x) 1783 return true 1784 } 1785 // match: (ANDQconst [0xFFFF] x) 1786 // cond: 1787 // result: (MOVWQZX x) 1788 for { 1789 if v.AuxInt != 0xFFFF { 1790 break 1791 } 1792 x := v.Args[0] 1793 v.reset(OpAMD64MOVWQZX) 1794 v.AddArg(x) 1795 return true 1796 } 1797 // match: (ANDQconst [0xFFFFFFFF] x) 1798 // cond: 1799 // result: (MOVLQZX x) 1800 for { 1801 if v.AuxInt != 0xFFFFFFFF { 1802 break 1803 } 1804 x := v.Args[0] 1805 v.reset(OpAMD64MOVLQZX) 1806 v.AddArg(x) 1807 return true 1808 } 1809 // match: (ANDQconst [0] _) 1810 // cond: 1811 // result: (MOVQconst [0]) 1812 for { 1813 if v.AuxInt != 0 { 1814 break 1815 } 1816 v.reset(OpAMD64MOVQconst) 1817 v.AuxInt = 0 1818 return true 1819 } 1820 // match: (ANDQconst [-1] x) 1821 // cond: 1822 // result: x 1823 for { 1824 if v.AuxInt != -1 { 1825 break 1826 } 1827 x := v.Args[0] 1828 v.reset(OpCopy) 1829 v.Type = x.Type 1830 v.AddArg(x) 1831 return true 1832 } 1833 // match: (ANDQconst [c] (MOVQconst [d])) 1834 // cond: 1835 // result: (MOVQconst [c&d]) 1836 for { 1837 c := v.AuxInt 1838 v_0 := v.Args[0] 1839 if v_0.Op != OpAMD64MOVQconst { 1840 break 1841 } 1842 d := v_0.AuxInt 1843 v.reset(OpAMD64MOVQconst) 1844 v.AuxInt = c & d 1845 return true 1846 } 1847 return false 1848 } 1849 func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { 1850 b := v.Block 1851 _ = b 1852 // match: (CMPB x (MOVLconst [c])) 1853 // cond: 1854 // result: (CMPBconst x [int64(int8(c))]) 1855 for { 1856 x := v.Args[0] 1857 v_1 := v.Args[1] 1858 if v_1.Op != OpAMD64MOVLconst { 1859 break 1860 } 1861 c := v_1.AuxInt 1862 v.reset(OpAMD64CMPBconst) 1863 v.AuxInt = int64(int8(c)) 1864 v.AddArg(x) 1865 return true 1866 } 1867 // match: (CMPB (MOVLconst [c]) x) 1868 // cond: 1869 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 1870 for { 1871 v_0 := v.Args[0] 1872 if v_0.Op != OpAMD64MOVLconst { 1873 break 1874 } 1875 c := v_0.AuxInt 1876 x := v.Args[1] 1877 v.reset(OpAMD64InvertFlags) 1878 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 1879 v0.AuxInt = int64(int8(c)) 1880 v0.AddArg(x) 1881 v.AddArg(v0) 1882 return true 1883 } 1884 return false 1885 } 1886 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { 1887 b := v.Block 1888 _ = b 1889 // match: (CMPBconst (MOVLconst [x]) [y]) 1890 // cond: int8(x)==int8(y) 1891 // result: (FlagEQ) 1892 for { 1893 y := v.AuxInt 1894 v_0 := v.Args[0] 1895 if v_0.Op != OpAMD64MOVLconst { 1896 break 1897 } 1898 x := v_0.AuxInt 1899 if !(int8(x) == int8(y)) { 1900 break 1901 } 1902 v.reset(OpAMD64FlagEQ) 1903 return true 1904 } 1905 // match: (CMPBconst (MOVLconst [x]) [y]) 1906 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 1907 // result: (FlagLT_ULT) 1908 for { 1909 y := v.AuxInt 1910 v_0 := v.Args[0] 1911 if v_0.Op != OpAMD64MOVLconst { 1912 break 1913 } 1914 x := v_0.AuxInt 1915 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 1916 break 1917 } 1918 v.reset(OpAMD64FlagLT_ULT) 1919 return true 1920 } 1921 // match: (CMPBconst (MOVLconst [x]) [y]) 1922 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 1923 // result: (FlagLT_UGT) 1924 for { 1925 y := v.AuxInt 1926 v_0 := v.Args[0] 1927 if v_0.Op != OpAMD64MOVLconst { 1928 break 1929 } 1930 x := v_0.AuxInt 1931 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 1932 break 1933 } 1934 v.reset(OpAMD64FlagLT_UGT) 1935 return true 1936 } 1937 // match: (CMPBconst (MOVLconst [x]) [y]) 1938 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 1939 // result: (FlagGT_ULT) 1940 for { 1941 y := v.AuxInt 1942 v_0 := v.Args[0] 1943 if v_0.Op != OpAMD64MOVLconst { 1944 break 1945 } 1946 x := v_0.AuxInt 1947 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 1948 break 1949 } 1950 v.reset(OpAMD64FlagGT_ULT) 1951 return true 1952 } 1953 // match: (CMPBconst (MOVLconst [x]) [y]) 1954 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 1955 // result: (FlagGT_UGT) 1956 for { 1957 y := v.AuxInt 1958 v_0 := v.Args[0] 1959 if v_0.Op != OpAMD64MOVLconst { 1960 break 1961 } 1962 x := v_0.AuxInt 1963 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 1964 break 1965 } 1966 v.reset(OpAMD64FlagGT_UGT) 1967 return true 1968 } 1969 // match: (CMPBconst (ANDLconst _ [m]) [n]) 1970 // cond: 0 <= int8(m) && int8(m) < int8(n) 1971 // result: (FlagLT_ULT) 1972 for { 1973 n := v.AuxInt 1974 v_0 := v.Args[0] 1975 if v_0.Op != OpAMD64ANDLconst { 1976 break 1977 } 1978 m := v_0.AuxInt 1979 if !(0 <= int8(m) && int8(m) < int8(n)) { 1980 break 1981 } 1982 v.reset(OpAMD64FlagLT_ULT) 1983 return true 1984 } 1985 // match: (CMPBconst (ANDL x y) [0]) 1986 // cond: 1987 // result: (TESTB x y) 1988 for { 1989 if v.AuxInt != 0 { 1990 break 1991 } 1992 v_0 := v.Args[0] 1993 if v_0.Op != OpAMD64ANDL { 1994 break 1995 } 1996 x := v_0.Args[0] 1997 y := v_0.Args[1] 1998 v.reset(OpAMD64TESTB) 1999 v.AddArg(x) 2000 v.AddArg(y) 2001 return true 2002 } 2003 // match: (CMPBconst (ANDLconst [c] x) [0]) 2004 // cond: 2005 // result: (TESTBconst [int64(int8(c))] x) 2006 for { 2007 if v.AuxInt != 0 { 2008 break 2009 } 2010 v_0 := v.Args[0] 2011 if v_0.Op != OpAMD64ANDLconst { 2012 break 2013 } 2014 c := v_0.AuxInt 2015 x := v_0.Args[0] 2016 v.reset(OpAMD64TESTBconst) 2017 v.AuxInt = int64(int8(c)) 2018 v.AddArg(x) 2019 return true 2020 } 2021 // match: (CMPBconst x [0]) 2022 // cond: 2023 // result: (TESTB x x) 2024 for { 2025 if v.AuxInt != 0 { 2026 break 2027 } 2028 x := v.Args[0] 2029 v.reset(OpAMD64TESTB) 2030 v.AddArg(x) 2031 v.AddArg(x) 2032 return true 2033 } 2034 return false 2035 } 2036 func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool { 2037 b := v.Block 2038 _ = b 2039 // match: (CMPL x (MOVLconst [c])) 2040 // cond: 2041 // result: (CMPLconst x [c]) 2042 for { 2043 x := v.Args[0] 2044 v_1 := v.Args[1] 2045 if v_1.Op != OpAMD64MOVLconst { 2046 break 2047 } 2048 c := v_1.AuxInt 2049 v.reset(OpAMD64CMPLconst) 2050 v.AuxInt = c 2051 v.AddArg(x) 2052 return true 2053 } 2054 // match: (CMPL (MOVLconst [c]) x) 2055 // cond: 2056 // result: (InvertFlags (CMPLconst x [c])) 2057 for { 2058 v_0 := v.Args[0] 2059 if v_0.Op != OpAMD64MOVLconst { 2060 break 2061 } 2062 c := v_0.AuxInt 2063 x := v.Args[1] 2064 v.reset(OpAMD64InvertFlags) 2065 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 2066 v0.AuxInt = c 2067 v0.AddArg(x) 2068 v.AddArg(v0) 2069 return true 2070 } 2071 return false 2072 } 2073 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool { 2074 b := v.Block 2075 _ = b 2076 // match: (CMPLconst (MOVLconst [x]) [y]) 2077 // cond: int32(x)==int32(y) 2078 // result: (FlagEQ) 2079 for { 2080 y := v.AuxInt 2081 v_0 := v.Args[0] 2082 if v_0.Op != OpAMD64MOVLconst { 2083 break 2084 } 2085 x := v_0.AuxInt 2086 if !(int32(x) == int32(y)) { 2087 break 2088 } 2089 v.reset(OpAMD64FlagEQ) 2090 return true 2091 } 2092 // match: (CMPLconst (MOVLconst [x]) [y]) 2093 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 2094 // result: (FlagLT_ULT) 2095 for { 2096 y := v.AuxInt 2097 v_0 := v.Args[0] 2098 if v_0.Op != OpAMD64MOVLconst { 2099 break 2100 } 2101 x := v_0.AuxInt 2102 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 2103 break 2104 } 2105 v.reset(OpAMD64FlagLT_ULT) 2106 return true 2107 } 2108 // match: (CMPLconst (MOVLconst [x]) [y]) 2109 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 2110 // result: (FlagLT_UGT) 2111 for { 2112 y := v.AuxInt 2113 v_0 := v.Args[0] 2114 if v_0.Op != OpAMD64MOVLconst { 2115 break 2116 } 2117 x := v_0.AuxInt 2118 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 2119 break 2120 } 2121 v.reset(OpAMD64FlagLT_UGT) 2122 return true 2123 } 2124 // match: (CMPLconst (MOVLconst [x]) [y]) 2125 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 2126 // result: (FlagGT_ULT) 2127 for { 2128 y := v.AuxInt 2129 v_0 := v.Args[0] 2130 if v_0.Op != OpAMD64MOVLconst { 2131 break 2132 } 2133 x := v_0.AuxInt 2134 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 2135 break 2136 } 2137 v.reset(OpAMD64FlagGT_ULT) 2138 return true 2139 } 2140 // match: (CMPLconst (MOVLconst [x]) [y]) 2141 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 2142 // result: (FlagGT_UGT) 2143 for { 2144 y := v.AuxInt 2145 v_0 := v.Args[0] 2146 if v_0.Op != OpAMD64MOVLconst { 2147 break 2148 } 2149 x := v_0.AuxInt 2150 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 2151 break 2152 } 2153 v.reset(OpAMD64FlagGT_UGT) 2154 return true 2155 } 2156 // match: (CMPLconst (SHRLconst _ [c]) [n]) 2157 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 2158 // result: (FlagLT_ULT) 2159 for { 2160 n := v.AuxInt 2161 v_0 := v.Args[0] 2162 if v_0.Op != OpAMD64SHRLconst { 2163 break 2164 } 2165 c := v_0.AuxInt 2166 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 2167 break 2168 } 2169 v.reset(OpAMD64FlagLT_ULT) 2170 return true 2171 } 2172 // match: (CMPLconst (ANDLconst _ [m]) [n]) 2173 // cond: 0 <= int32(m) && int32(m) < int32(n) 2174 // result: (FlagLT_ULT) 2175 for { 2176 n := v.AuxInt 2177 v_0 := v.Args[0] 2178 if v_0.Op != OpAMD64ANDLconst { 2179 break 2180 } 2181 m := v_0.AuxInt 2182 if !(0 <= int32(m) && int32(m) < int32(n)) { 2183 break 2184 } 2185 v.reset(OpAMD64FlagLT_ULT) 2186 return true 2187 } 2188 // match: (CMPLconst (ANDL x y) [0]) 2189 // cond: 2190 // result: (TESTL x y) 2191 for { 2192 if v.AuxInt != 0 { 2193 break 2194 } 2195 v_0 := v.Args[0] 2196 if v_0.Op != OpAMD64ANDL { 2197 break 2198 } 2199 x := v_0.Args[0] 2200 y := v_0.Args[1] 2201 v.reset(OpAMD64TESTL) 2202 v.AddArg(x) 2203 v.AddArg(y) 2204 return true 2205 } 2206 // match: (CMPLconst (ANDLconst [c] x) [0]) 2207 // cond: 2208 // result: (TESTLconst [c] x) 2209 for { 2210 if v.AuxInt != 0 { 2211 break 2212 } 2213 v_0 := v.Args[0] 2214 if v_0.Op != OpAMD64ANDLconst { 2215 break 2216 } 2217 c := v_0.AuxInt 2218 x := v_0.Args[0] 2219 v.reset(OpAMD64TESTLconst) 2220 v.AuxInt = c 2221 v.AddArg(x) 2222 return true 2223 } 2224 // match: (CMPLconst x [0]) 2225 // cond: 2226 // result: (TESTL x x) 2227 for { 2228 if v.AuxInt != 0 { 2229 break 2230 } 2231 x := v.Args[0] 2232 v.reset(OpAMD64TESTL) 2233 v.AddArg(x) 2234 v.AddArg(x) 2235 return true 2236 } 2237 return false 2238 } 2239 func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool { 2240 b := v.Block 2241 _ = b 2242 // match: (CMPQ x (MOVQconst [c])) 2243 // cond: is32Bit(c) 2244 // result: (CMPQconst x [c]) 2245 for { 2246 x := v.Args[0] 2247 v_1 := v.Args[1] 2248 if v_1.Op != OpAMD64MOVQconst { 2249 break 2250 } 2251 c := v_1.AuxInt 2252 if !(is32Bit(c)) { 2253 break 2254 } 2255 v.reset(OpAMD64CMPQconst) 2256 v.AuxInt = c 2257 v.AddArg(x) 2258 return true 2259 } 2260 // match: (CMPQ (MOVQconst [c]) x) 2261 // cond: is32Bit(c) 2262 // result: (InvertFlags (CMPQconst x [c])) 2263 for { 2264 v_0 := v.Args[0] 2265 if v_0.Op != OpAMD64MOVQconst { 2266 break 2267 } 2268 c := v_0.AuxInt 2269 x := v.Args[1] 2270 if !(is32Bit(c)) { 2271 break 2272 } 2273 v.reset(OpAMD64InvertFlags) 2274 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 2275 v0.AuxInt = c 2276 v0.AddArg(x) 2277 v.AddArg(v0) 2278 return true 2279 } 2280 return false 2281 } 2282 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool { 2283 b := v.Block 2284 _ = b 2285 // match: (CMPQconst (MOVQconst [x]) [y]) 2286 // cond: x==y 2287 // result: (FlagEQ) 2288 for { 2289 y := v.AuxInt 2290 v_0 := v.Args[0] 2291 if v_0.Op != OpAMD64MOVQconst { 2292 break 2293 } 2294 x := v_0.AuxInt 2295 if !(x == y) { 2296 break 2297 } 2298 v.reset(OpAMD64FlagEQ) 2299 return true 2300 } 2301 // match: (CMPQconst (MOVQconst [x]) [y]) 2302 // cond: x<y && uint64(x)<uint64(y) 2303 // result: (FlagLT_ULT) 2304 for { 2305 y := v.AuxInt 2306 v_0 := v.Args[0] 2307 if v_0.Op != OpAMD64MOVQconst { 2308 break 2309 } 2310 x := v_0.AuxInt 2311 if !(x < y && uint64(x) < uint64(y)) { 2312 break 2313 } 2314 v.reset(OpAMD64FlagLT_ULT) 2315 return true 2316 } 2317 // match: (CMPQconst (MOVQconst [x]) [y]) 2318 // cond: x<y && uint64(x)>uint64(y) 2319 // result: (FlagLT_UGT) 2320 for { 2321 y := v.AuxInt 2322 v_0 := v.Args[0] 2323 if v_0.Op != OpAMD64MOVQconst { 2324 break 2325 } 2326 x := v_0.AuxInt 2327 if !(x < y && uint64(x) > uint64(y)) { 2328 break 2329 } 2330 v.reset(OpAMD64FlagLT_UGT) 2331 return true 2332 } 2333 // match: (CMPQconst (MOVQconst [x]) [y]) 2334 // cond: x>y && uint64(x)<uint64(y) 2335 // result: (FlagGT_ULT) 2336 for { 2337 y := v.AuxInt 2338 v_0 := v.Args[0] 2339 if v_0.Op != OpAMD64MOVQconst { 2340 break 2341 } 2342 x := v_0.AuxInt 2343 if !(x > y && uint64(x) < uint64(y)) { 2344 break 2345 } 2346 v.reset(OpAMD64FlagGT_ULT) 2347 return true 2348 } 2349 // match: (CMPQconst (MOVQconst [x]) [y]) 2350 // cond: x>y && uint64(x)>uint64(y) 2351 // result: (FlagGT_UGT) 2352 for { 2353 y := v.AuxInt 2354 v_0 := v.Args[0] 2355 if v_0.Op != OpAMD64MOVQconst { 2356 break 2357 } 2358 x := v_0.AuxInt 2359 if !(x > y && uint64(x) > uint64(y)) { 2360 break 2361 } 2362 v.reset(OpAMD64FlagGT_UGT) 2363 return true 2364 } 2365 // match: (CMPQconst (MOVBQZX _) [c]) 2366 // cond: 0xFF < c 2367 // result: (FlagLT_ULT) 2368 for { 2369 c := v.AuxInt 2370 v_0 := v.Args[0] 2371 if v_0.Op != OpAMD64MOVBQZX { 2372 break 2373 } 2374 if !(0xFF < c) { 2375 break 2376 } 2377 v.reset(OpAMD64FlagLT_ULT) 2378 return true 2379 } 2380 // match: (CMPQconst (MOVWQZX _) [c]) 2381 // cond: 0xFFFF < c 2382 // result: (FlagLT_ULT) 2383 for { 2384 c := v.AuxInt 2385 v_0 := v.Args[0] 2386 if v_0.Op != OpAMD64MOVWQZX { 2387 break 2388 } 2389 if !(0xFFFF < c) { 2390 break 2391 } 2392 v.reset(OpAMD64FlagLT_ULT) 2393 return true 2394 } 2395 // match: (CMPQconst (MOVLQZX _) [c]) 2396 // cond: 0xFFFFFFFF < c 2397 // result: (FlagLT_ULT) 2398 for { 2399 c := v.AuxInt 2400 v_0 := v.Args[0] 2401 if v_0.Op != OpAMD64MOVLQZX { 2402 break 2403 } 2404 if !(0xFFFFFFFF < c) { 2405 break 2406 } 2407 v.reset(OpAMD64FlagLT_ULT) 2408 return true 2409 } 2410 // match: (CMPQconst (SHRQconst _ [c]) [n]) 2411 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 2412 // result: (FlagLT_ULT) 2413 for { 2414 n := v.AuxInt 2415 v_0 := v.Args[0] 2416 if v_0.Op != OpAMD64SHRQconst { 2417 break 2418 } 2419 c := v_0.AuxInt 2420 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 2421 break 2422 } 2423 v.reset(OpAMD64FlagLT_ULT) 2424 return true 2425 } 2426 // match: (CMPQconst (ANDQconst _ [m]) [n]) 2427 // cond: 0 <= m && m < n 2428 // result: (FlagLT_ULT) 2429 for { 2430 n := v.AuxInt 2431 v_0 := v.Args[0] 2432 if v_0.Op != OpAMD64ANDQconst { 2433 break 2434 } 2435 m := v_0.AuxInt 2436 if !(0 <= m && m < n) { 2437 break 2438 } 2439 v.reset(OpAMD64FlagLT_ULT) 2440 return true 2441 } 2442 // match: (CMPQconst (ANDQ x y) [0]) 2443 // cond: 2444 // result: (TESTQ x y) 2445 for { 2446 if v.AuxInt != 0 { 2447 break 2448 } 2449 v_0 := v.Args[0] 2450 if v_0.Op != OpAMD64ANDQ { 2451 break 2452 } 2453 x := v_0.Args[0] 2454 y := v_0.Args[1] 2455 v.reset(OpAMD64TESTQ) 2456 v.AddArg(x) 2457 v.AddArg(y) 2458 return true 2459 } 2460 // match: (CMPQconst (ANDQconst [c] x) [0]) 2461 // cond: 2462 // result: (TESTQconst [c] x) 2463 for { 2464 if v.AuxInt != 0 { 2465 break 2466 } 2467 v_0 := v.Args[0] 2468 if v_0.Op != OpAMD64ANDQconst { 2469 break 2470 } 2471 c := v_0.AuxInt 2472 x := v_0.Args[0] 2473 v.reset(OpAMD64TESTQconst) 2474 v.AuxInt = c 2475 v.AddArg(x) 2476 return true 2477 } 2478 // match: (CMPQconst x [0]) 2479 // cond: 2480 // result: (TESTQ x x) 2481 for { 2482 if v.AuxInt != 0 { 2483 break 2484 } 2485 x := v.Args[0] 2486 v.reset(OpAMD64TESTQ) 2487 v.AddArg(x) 2488 v.AddArg(x) 2489 return true 2490 } 2491 return false 2492 } 2493 func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { 2494 b := v.Block 2495 _ = b 2496 // match: (CMPW x (MOVLconst [c])) 2497 // cond: 2498 // result: (CMPWconst x [int64(int16(c))]) 2499 for { 2500 x := v.Args[0] 2501 v_1 := v.Args[1] 2502 if v_1.Op != OpAMD64MOVLconst { 2503 break 2504 } 2505 c := v_1.AuxInt 2506 v.reset(OpAMD64CMPWconst) 2507 v.AuxInt = int64(int16(c)) 2508 v.AddArg(x) 2509 return true 2510 } 2511 // match: (CMPW (MOVLconst [c]) x) 2512 // cond: 2513 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 2514 for { 2515 v_0 := v.Args[0] 2516 if v_0.Op != OpAMD64MOVLconst { 2517 break 2518 } 2519 c := v_0.AuxInt 2520 x := v.Args[1] 2521 v.reset(OpAMD64InvertFlags) 2522 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 2523 v0.AuxInt = int64(int16(c)) 2524 v0.AddArg(x) 2525 v.AddArg(v0) 2526 return true 2527 } 2528 return false 2529 } 2530 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { 2531 b := v.Block 2532 _ = b 2533 // match: (CMPWconst (MOVLconst [x]) [y]) 2534 // cond: int16(x)==int16(y) 2535 // result: (FlagEQ) 2536 for { 2537 y := v.AuxInt 2538 v_0 := v.Args[0] 2539 if v_0.Op != OpAMD64MOVLconst { 2540 break 2541 } 2542 x := v_0.AuxInt 2543 if !(int16(x) == int16(y)) { 2544 break 2545 } 2546 v.reset(OpAMD64FlagEQ) 2547 return true 2548 } 2549 // match: (CMPWconst (MOVLconst [x]) [y]) 2550 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 2551 // result: (FlagLT_ULT) 2552 for { 2553 y := v.AuxInt 2554 v_0 := v.Args[0] 2555 if v_0.Op != OpAMD64MOVLconst { 2556 break 2557 } 2558 x := v_0.AuxInt 2559 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 2560 break 2561 } 2562 v.reset(OpAMD64FlagLT_ULT) 2563 return true 2564 } 2565 // match: (CMPWconst (MOVLconst [x]) [y]) 2566 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 2567 // result: (FlagLT_UGT) 2568 for { 2569 y := v.AuxInt 2570 v_0 := v.Args[0] 2571 if v_0.Op != OpAMD64MOVLconst { 2572 break 2573 } 2574 x := v_0.AuxInt 2575 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 2576 break 2577 } 2578 v.reset(OpAMD64FlagLT_UGT) 2579 return true 2580 } 2581 // match: (CMPWconst (MOVLconst [x]) [y]) 2582 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 2583 // result: (FlagGT_ULT) 2584 for { 2585 y := v.AuxInt 2586 v_0 := v.Args[0] 2587 if v_0.Op != OpAMD64MOVLconst { 2588 break 2589 } 2590 x := v_0.AuxInt 2591 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 2592 break 2593 } 2594 v.reset(OpAMD64FlagGT_ULT) 2595 return true 2596 } 2597 // match: (CMPWconst (MOVLconst [x]) [y]) 2598 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 2599 // result: (FlagGT_UGT) 2600 for { 2601 y := v.AuxInt 2602 v_0 := v.Args[0] 2603 if v_0.Op != OpAMD64MOVLconst { 2604 break 2605 } 2606 x := v_0.AuxInt 2607 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 2608 break 2609 } 2610 v.reset(OpAMD64FlagGT_UGT) 2611 return true 2612 } 2613 // match: (CMPWconst (ANDLconst _ [m]) [n]) 2614 // cond: 0 <= int16(m) && int16(m) < int16(n) 2615 // result: (FlagLT_ULT) 2616 for { 2617 n := v.AuxInt 2618 v_0 := v.Args[0] 2619 if v_0.Op != OpAMD64ANDLconst { 2620 break 2621 } 2622 m := v_0.AuxInt 2623 if !(0 <= int16(m) && int16(m) < int16(n)) { 2624 break 2625 } 2626 v.reset(OpAMD64FlagLT_ULT) 2627 return true 2628 } 2629 // match: (CMPWconst (ANDL x y) [0]) 2630 // cond: 2631 // result: (TESTW x y) 2632 for { 2633 if v.AuxInt != 0 { 2634 break 2635 } 2636 v_0 := v.Args[0] 2637 if v_0.Op != OpAMD64ANDL { 2638 break 2639 } 2640 x := v_0.Args[0] 2641 y := v_0.Args[1] 2642 v.reset(OpAMD64TESTW) 2643 v.AddArg(x) 2644 v.AddArg(y) 2645 return true 2646 } 2647 // match: (CMPWconst (ANDLconst [c] x) [0]) 2648 // cond: 2649 // result: (TESTWconst [int64(int16(c))] x) 2650 for { 2651 if v.AuxInt != 0 { 2652 break 2653 } 2654 v_0 := v.Args[0] 2655 if v_0.Op != OpAMD64ANDLconst { 2656 break 2657 } 2658 c := v_0.AuxInt 2659 x := v_0.Args[0] 2660 v.reset(OpAMD64TESTWconst) 2661 v.AuxInt = int64(int16(c)) 2662 v.AddArg(x) 2663 return true 2664 } 2665 // match: (CMPWconst x [0]) 2666 // cond: 2667 // result: (TESTW x x) 2668 for { 2669 if v.AuxInt != 0 { 2670 break 2671 } 2672 x := v.Args[0] 2673 v.reset(OpAMD64TESTW) 2674 v.AddArg(x) 2675 v.AddArg(x) 2676 return true 2677 } 2678 return false 2679 } 2680 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value, config *Config) bool { 2681 b := v.Block 2682 _ = b 2683 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 2684 // cond: is32Bit(off1+off2) 2685 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 2686 for { 2687 off1 := v.AuxInt 2688 sym := v.Aux 2689 v_0 := v.Args[0] 2690 if v_0.Op != OpAMD64ADDQconst { 2691 break 2692 } 2693 off2 := v_0.AuxInt 2694 ptr := v_0.Args[0] 2695 old := v.Args[1] 2696 new_ := v.Args[2] 2697 mem := v.Args[3] 2698 if !(is32Bit(off1 + off2)) { 2699 break 2700 } 2701 v.reset(OpAMD64CMPXCHGLlock) 2702 v.AuxInt = off1 + off2 2703 v.Aux = sym 2704 v.AddArg(ptr) 2705 v.AddArg(old) 2706 v.AddArg(new_) 2707 v.AddArg(mem) 2708 return true 2709 } 2710 return false 2711 } 2712 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value, config *Config) bool { 2713 b := v.Block 2714 _ = b 2715 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 2716 // cond: is32Bit(off1+off2) 2717 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 2718 for { 2719 off1 := v.AuxInt 2720 sym := v.Aux 2721 v_0 := v.Args[0] 2722 if v_0.Op != OpAMD64ADDQconst { 2723 break 2724 } 2725 off2 := v_0.AuxInt 2726 ptr := v_0.Args[0] 2727 old := v.Args[1] 2728 new_ := v.Args[2] 2729 mem := v.Args[3] 2730 if !(is32Bit(off1 + off2)) { 2731 break 2732 } 2733 v.reset(OpAMD64CMPXCHGQlock) 2734 v.AuxInt = off1 + off2 2735 v.Aux = sym 2736 v.AddArg(ptr) 2737 v.AddArg(old) 2738 v.AddArg(new_) 2739 v.AddArg(mem) 2740 return true 2741 } 2742 return false 2743 } 2744 func rewriteValueAMD64_OpAMD64LEAL(v *Value, config *Config) bool { 2745 b := v.Block 2746 _ = b 2747 // match: (LEAL [c] {s} (ADDLconst [d] x)) 2748 // cond: is32Bit(c+d) 2749 // result: (LEAL [c+d] {s} x) 2750 for { 2751 c := v.AuxInt 2752 s := v.Aux 2753 v_0 := v.Args[0] 2754 if v_0.Op != OpAMD64ADDLconst { 2755 break 2756 } 2757 d := v_0.AuxInt 2758 x := v_0.Args[0] 2759 if !(is32Bit(c + d)) { 2760 break 2761 } 2762 v.reset(OpAMD64LEAL) 2763 v.AuxInt = c + d 2764 v.Aux = s 2765 v.AddArg(x) 2766 return true 2767 } 2768 return false 2769 } 2770 func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool { 2771 b := v.Block 2772 _ = b 2773 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 2774 // cond: is32Bit(c+d) 2775 // result: (LEAQ [c+d] {s} x) 2776 for { 2777 c := v.AuxInt 2778 s := v.Aux 2779 v_0 := v.Args[0] 2780 if v_0.Op != OpAMD64ADDQconst { 2781 break 2782 } 2783 d := v_0.AuxInt 2784 x := v_0.Args[0] 2785 if !(is32Bit(c + d)) { 2786 break 2787 } 2788 v.reset(OpAMD64LEAQ) 2789 v.AuxInt = c + d 2790 v.Aux = s 2791 v.AddArg(x) 2792 return true 2793 } 2794 // match: (LEAQ [c] {s} (ADDQ x y)) 2795 // cond: x.Op != OpSB && y.Op != OpSB 2796 // result: (LEAQ1 [c] {s} x y) 2797 for { 2798 c := v.AuxInt 2799 s := v.Aux 2800 v_0 := v.Args[0] 2801 if v_0.Op != OpAMD64ADDQ { 2802 break 2803 } 2804 x := v_0.Args[0] 2805 y := v_0.Args[1] 2806 if !(x.Op != OpSB && y.Op != OpSB) { 2807 break 2808 } 2809 v.reset(OpAMD64LEAQ1) 2810 v.AuxInt = c 2811 v.Aux = s 2812 v.AddArg(x) 2813 v.AddArg(y) 2814 return true 2815 } 2816 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 2817 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2818 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 2819 for { 2820 off1 := v.AuxInt 2821 sym1 := v.Aux 2822 v_0 := v.Args[0] 2823 if v_0.Op != OpAMD64LEAQ { 2824 break 2825 } 2826 off2 := v_0.AuxInt 2827 sym2 := v_0.Aux 2828 x := v_0.Args[0] 2829 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2830 break 2831 } 2832 v.reset(OpAMD64LEAQ) 2833 v.AuxInt = off1 + off2 2834 v.Aux = mergeSym(sym1, sym2) 2835 v.AddArg(x) 2836 return true 2837 } 2838 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 2839 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2840 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 2841 for { 2842 off1 := v.AuxInt 2843 sym1 := v.Aux 2844 v_0 := v.Args[0] 2845 if v_0.Op != OpAMD64LEAQ1 { 2846 break 2847 } 2848 off2 := v_0.AuxInt 2849 sym2 := v_0.Aux 2850 x := v_0.Args[0] 2851 y := v_0.Args[1] 2852 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2853 break 2854 } 2855 v.reset(OpAMD64LEAQ1) 2856 v.AuxInt = off1 + off2 2857 v.Aux = mergeSym(sym1, sym2) 2858 v.AddArg(x) 2859 v.AddArg(y) 2860 return true 2861 } 2862 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 2863 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2864 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 2865 for { 2866 off1 := v.AuxInt 2867 sym1 := v.Aux 2868 v_0 := v.Args[0] 2869 if v_0.Op != OpAMD64LEAQ2 { 2870 break 2871 } 2872 off2 := v_0.AuxInt 2873 sym2 := v_0.Aux 2874 x := v_0.Args[0] 2875 y := v_0.Args[1] 2876 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2877 break 2878 } 2879 v.reset(OpAMD64LEAQ2) 2880 v.AuxInt = off1 + off2 2881 v.Aux = mergeSym(sym1, sym2) 2882 v.AddArg(x) 2883 v.AddArg(y) 2884 return true 2885 } 2886 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 2887 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2888 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 2889 for { 2890 off1 := v.AuxInt 2891 sym1 := v.Aux 2892 v_0 := v.Args[0] 2893 if v_0.Op != OpAMD64LEAQ4 { 2894 break 2895 } 2896 off2 := v_0.AuxInt 2897 sym2 := v_0.Aux 2898 x := v_0.Args[0] 2899 y := v_0.Args[1] 2900 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2901 break 2902 } 2903 v.reset(OpAMD64LEAQ4) 2904 v.AuxInt = off1 + off2 2905 v.Aux = mergeSym(sym1, sym2) 2906 v.AddArg(x) 2907 v.AddArg(y) 2908 return true 2909 } 2910 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 2911 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2912 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 2913 for { 2914 off1 := v.AuxInt 2915 sym1 := v.Aux 2916 v_0 := v.Args[0] 2917 if v_0.Op != OpAMD64LEAQ8 { 2918 break 2919 } 2920 off2 := v_0.AuxInt 2921 sym2 := v_0.Aux 2922 x := v_0.Args[0] 2923 y := v_0.Args[1] 2924 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2925 break 2926 } 2927 v.reset(OpAMD64LEAQ8) 2928 v.AuxInt = off1 + off2 2929 v.Aux = mergeSym(sym1, sym2) 2930 v.AddArg(x) 2931 v.AddArg(y) 2932 return true 2933 } 2934 return false 2935 } 2936 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool { 2937 b := v.Block 2938 _ = b 2939 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 2940 // cond: is32Bit(c+d) && x.Op != OpSB 2941 // result: (LEAQ1 [c+d] {s} x y) 2942 for { 2943 c := v.AuxInt 2944 s := v.Aux 2945 v_0 := v.Args[0] 2946 if v_0.Op != OpAMD64ADDQconst { 2947 break 2948 } 2949 d := v_0.AuxInt 2950 x := v_0.Args[0] 2951 y := v.Args[1] 2952 if !(is32Bit(c+d) && x.Op != OpSB) { 2953 break 2954 } 2955 v.reset(OpAMD64LEAQ1) 2956 v.AuxInt = c + d 2957 v.Aux = s 2958 v.AddArg(x) 2959 v.AddArg(y) 2960 return true 2961 } 2962 // match: (LEAQ1 [c] {s} x (ADDQconst [d] y)) 2963 // cond: is32Bit(c+d) && y.Op != OpSB 2964 // result: (LEAQ1 [c+d] {s} x y) 2965 for { 2966 c := v.AuxInt 2967 s := v.Aux 2968 x := v.Args[0] 2969 v_1 := v.Args[1] 2970 if v_1.Op != OpAMD64ADDQconst { 2971 break 2972 } 2973 d := v_1.AuxInt 2974 y := v_1.Args[0] 2975 if !(is32Bit(c+d) && y.Op != OpSB) { 2976 break 2977 } 2978 v.reset(OpAMD64LEAQ1) 2979 v.AuxInt = c + d 2980 v.Aux = s 2981 v.AddArg(x) 2982 v.AddArg(y) 2983 return true 2984 } 2985 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 2986 // cond: 2987 // result: (LEAQ2 [c] {s} x y) 2988 for { 2989 c := v.AuxInt 2990 s := v.Aux 2991 x := v.Args[0] 2992 v_1 := v.Args[1] 2993 if v_1.Op != OpAMD64SHLQconst { 2994 break 2995 } 2996 if v_1.AuxInt != 1 { 2997 break 2998 } 2999 y := v_1.Args[0] 3000 v.reset(OpAMD64LEAQ2) 3001 v.AuxInt = c 3002 v.Aux = s 3003 v.AddArg(x) 3004 v.AddArg(y) 3005 return true 3006 } 3007 // match: (LEAQ1 [c] {s} (SHLQconst [1] x) y) 3008 // cond: 3009 // result: (LEAQ2 [c] {s} y x) 3010 for { 3011 c := v.AuxInt 3012 s := v.Aux 3013 v_0 := v.Args[0] 3014 if v_0.Op != OpAMD64SHLQconst { 3015 break 3016 } 3017 if v_0.AuxInt != 1 { 3018 break 3019 } 3020 x := v_0.Args[0] 3021 y := v.Args[1] 3022 v.reset(OpAMD64LEAQ2) 3023 v.AuxInt = c 3024 v.Aux = s 3025 v.AddArg(y) 3026 v.AddArg(x) 3027 return true 3028 } 3029 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 3030 // cond: 3031 // result: (LEAQ4 [c] {s} x y) 3032 for { 3033 c := v.AuxInt 3034 s := v.Aux 3035 x := v.Args[0] 3036 v_1 := v.Args[1] 3037 if v_1.Op != OpAMD64SHLQconst { 3038 break 3039 } 3040 if v_1.AuxInt != 2 { 3041 break 3042 } 3043 y := v_1.Args[0] 3044 v.reset(OpAMD64LEAQ4) 3045 v.AuxInt = c 3046 v.Aux = s 3047 v.AddArg(x) 3048 v.AddArg(y) 3049 return true 3050 } 3051 // match: (LEAQ1 [c] {s} (SHLQconst [2] x) y) 3052 // cond: 3053 // result: (LEAQ4 [c] {s} y x) 3054 for { 3055 c := v.AuxInt 3056 s := v.Aux 3057 v_0 := v.Args[0] 3058 if v_0.Op != OpAMD64SHLQconst { 3059 break 3060 } 3061 if v_0.AuxInt != 2 { 3062 break 3063 } 3064 x := v_0.Args[0] 3065 y := v.Args[1] 3066 v.reset(OpAMD64LEAQ4) 3067 v.AuxInt = c 3068 v.Aux = s 3069 v.AddArg(y) 3070 v.AddArg(x) 3071 return true 3072 } 3073 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 3074 // cond: 3075 // result: (LEAQ8 [c] {s} x y) 3076 for { 3077 c := v.AuxInt 3078 s := v.Aux 3079 x := v.Args[0] 3080 v_1 := v.Args[1] 3081 if v_1.Op != OpAMD64SHLQconst { 3082 break 3083 } 3084 if v_1.AuxInt != 3 { 3085 break 3086 } 3087 y := v_1.Args[0] 3088 v.reset(OpAMD64LEAQ8) 3089 v.AuxInt = c 3090 v.Aux = s 3091 v.AddArg(x) 3092 v.AddArg(y) 3093 return true 3094 } 3095 // match: (LEAQ1 [c] {s} (SHLQconst [3] x) y) 3096 // cond: 3097 // result: (LEAQ8 [c] {s} y x) 3098 for { 3099 c := v.AuxInt 3100 s := v.Aux 3101 v_0 := v.Args[0] 3102 if v_0.Op != OpAMD64SHLQconst { 3103 break 3104 } 3105 if v_0.AuxInt != 3 { 3106 break 3107 } 3108 x := v_0.Args[0] 3109 y := v.Args[1] 3110 v.reset(OpAMD64LEAQ8) 3111 v.AuxInt = c 3112 v.Aux = s 3113 v.AddArg(y) 3114 v.AddArg(x) 3115 return true 3116 } 3117 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3118 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3119 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3120 for { 3121 off1 := v.AuxInt 3122 sym1 := v.Aux 3123 v_0 := v.Args[0] 3124 if v_0.Op != OpAMD64LEAQ { 3125 break 3126 } 3127 off2 := v_0.AuxInt 3128 sym2 := v_0.Aux 3129 x := v_0.Args[0] 3130 y := v.Args[1] 3131 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3132 break 3133 } 3134 v.reset(OpAMD64LEAQ1) 3135 v.AuxInt = off1 + off2 3136 v.Aux = mergeSym(sym1, sym2) 3137 v.AddArg(x) 3138 v.AddArg(y) 3139 return true 3140 } 3141 // match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) 3142 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB 3143 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3144 for { 3145 off1 := v.AuxInt 3146 sym1 := v.Aux 3147 x := v.Args[0] 3148 v_1 := v.Args[1] 3149 if v_1.Op != OpAMD64LEAQ { 3150 break 3151 } 3152 off2 := v_1.AuxInt 3153 sym2 := v_1.Aux 3154 y := v_1.Args[0] 3155 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { 3156 break 3157 } 3158 v.reset(OpAMD64LEAQ1) 3159 v.AuxInt = off1 + off2 3160 v.Aux = mergeSym(sym1, sym2) 3161 v.AddArg(x) 3162 v.AddArg(y) 3163 return true 3164 } 3165 return false 3166 } 3167 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool { 3168 b := v.Block 3169 _ = b 3170 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 3171 // cond: is32Bit(c+d) && x.Op != OpSB 3172 // result: (LEAQ2 [c+d] {s} x y) 3173 for { 3174 c := v.AuxInt 3175 s := v.Aux 3176 v_0 := v.Args[0] 3177 if v_0.Op != OpAMD64ADDQconst { 3178 break 3179 } 3180 d := v_0.AuxInt 3181 x := v_0.Args[0] 3182 y := v.Args[1] 3183 if !(is32Bit(c+d) && x.Op != OpSB) { 3184 break 3185 } 3186 v.reset(OpAMD64LEAQ2) 3187 v.AuxInt = c + d 3188 v.Aux = s 3189 v.AddArg(x) 3190 v.AddArg(y) 3191 return true 3192 } 3193 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 3194 // cond: is32Bit(c+2*d) && y.Op != OpSB 3195 // result: (LEAQ2 [c+2*d] {s} x y) 3196 for { 3197 c := v.AuxInt 3198 s := v.Aux 3199 x := v.Args[0] 3200 v_1 := v.Args[1] 3201 if v_1.Op != OpAMD64ADDQconst { 3202 break 3203 } 3204 d := v_1.AuxInt 3205 y := v_1.Args[0] 3206 if !(is32Bit(c+2*d) && y.Op != OpSB) { 3207 break 3208 } 3209 v.reset(OpAMD64LEAQ2) 3210 v.AuxInt = c + 2*d 3211 v.Aux = s 3212 v.AddArg(x) 3213 v.AddArg(y) 3214 return true 3215 } 3216 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 3217 // cond: 3218 // result: (LEAQ4 [c] {s} x y) 3219 for { 3220 c := v.AuxInt 3221 s := v.Aux 3222 x := v.Args[0] 3223 v_1 := v.Args[1] 3224 if v_1.Op != OpAMD64SHLQconst { 3225 break 3226 } 3227 if v_1.AuxInt != 1 { 3228 break 3229 } 3230 y := v_1.Args[0] 3231 v.reset(OpAMD64LEAQ4) 3232 v.AuxInt = c 3233 v.Aux = s 3234 v.AddArg(x) 3235 v.AddArg(y) 3236 return true 3237 } 3238 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 3239 // cond: 3240 // result: (LEAQ8 [c] {s} x y) 3241 for { 3242 c := v.AuxInt 3243 s := v.Aux 3244 x := v.Args[0] 3245 v_1 := v.Args[1] 3246 if v_1.Op != OpAMD64SHLQconst { 3247 break 3248 } 3249 if v_1.AuxInt != 2 { 3250 break 3251 } 3252 y := v_1.Args[0] 3253 v.reset(OpAMD64LEAQ8) 3254 v.AuxInt = c 3255 v.Aux = s 3256 v.AddArg(x) 3257 v.AddArg(y) 3258 return true 3259 } 3260 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3261 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3262 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3263 for { 3264 off1 := v.AuxInt 3265 sym1 := v.Aux 3266 v_0 := v.Args[0] 3267 if v_0.Op != OpAMD64LEAQ { 3268 break 3269 } 3270 off2 := v_0.AuxInt 3271 sym2 := v_0.Aux 3272 x := v_0.Args[0] 3273 y := v.Args[1] 3274 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3275 break 3276 } 3277 v.reset(OpAMD64LEAQ2) 3278 v.AuxInt = off1 + off2 3279 v.Aux = mergeSym(sym1, sym2) 3280 v.AddArg(x) 3281 v.AddArg(y) 3282 return true 3283 } 3284 return false 3285 } 3286 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool { 3287 b := v.Block 3288 _ = b 3289 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 3290 // cond: is32Bit(c+d) && x.Op != OpSB 3291 // result: (LEAQ4 [c+d] {s} x y) 3292 for { 3293 c := v.AuxInt 3294 s := v.Aux 3295 v_0 := v.Args[0] 3296 if v_0.Op != OpAMD64ADDQconst { 3297 break 3298 } 3299 d := v_0.AuxInt 3300 x := v_0.Args[0] 3301 y := v.Args[1] 3302 if !(is32Bit(c+d) && x.Op != OpSB) { 3303 break 3304 } 3305 v.reset(OpAMD64LEAQ4) 3306 v.AuxInt = c + d 3307 v.Aux = s 3308 v.AddArg(x) 3309 v.AddArg(y) 3310 return true 3311 } 3312 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 3313 // cond: is32Bit(c+4*d) && y.Op != OpSB 3314 // result: (LEAQ4 [c+4*d] {s} x y) 3315 for { 3316 c := v.AuxInt 3317 s := v.Aux 3318 x := v.Args[0] 3319 v_1 := v.Args[1] 3320 if v_1.Op != OpAMD64ADDQconst { 3321 break 3322 } 3323 d := v_1.AuxInt 3324 y := v_1.Args[0] 3325 if !(is32Bit(c+4*d) && y.Op != OpSB) { 3326 break 3327 } 3328 v.reset(OpAMD64LEAQ4) 3329 v.AuxInt = c + 4*d 3330 v.Aux = s 3331 v.AddArg(x) 3332 v.AddArg(y) 3333 return true 3334 } 3335 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 3336 // cond: 3337 // result: (LEAQ8 [c] {s} x y) 3338 for { 3339 c := v.AuxInt 3340 s := v.Aux 3341 x := v.Args[0] 3342 v_1 := v.Args[1] 3343 if v_1.Op != OpAMD64SHLQconst { 3344 break 3345 } 3346 if v_1.AuxInt != 1 { 3347 break 3348 } 3349 y := v_1.Args[0] 3350 v.reset(OpAMD64LEAQ8) 3351 v.AuxInt = c 3352 v.Aux = s 3353 v.AddArg(x) 3354 v.AddArg(y) 3355 return true 3356 } 3357 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3358 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3359 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3360 for { 3361 off1 := v.AuxInt 3362 sym1 := v.Aux 3363 v_0 := v.Args[0] 3364 if v_0.Op != OpAMD64LEAQ { 3365 break 3366 } 3367 off2 := v_0.AuxInt 3368 sym2 := v_0.Aux 3369 x := v_0.Args[0] 3370 y := v.Args[1] 3371 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3372 break 3373 } 3374 v.reset(OpAMD64LEAQ4) 3375 v.AuxInt = off1 + off2 3376 v.Aux = mergeSym(sym1, sym2) 3377 v.AddArg(x) 3378 v.AddArg(y) 3379 return true 3380 } 3381 return false 3382 } 3383 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool { 3384 b := v.Block 3385 _ = b 3386 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 3387 // cond: is32Bit(c+d) && x.Op != OpSB 3388 // result: (LEAQ8 [c+d] {s} x y) 3389 for { 3390 c := v.AuxInt 3391 s := v.Aux 3392 v_0 := v.Args[0] 3393 if v_0.Op != OpAMD64ADDQconst { 3394 break 3395 } 3396 d := v_0.AuxInt 3397 x := v_0.Args[0] 3398 y := v.Args[1] 3399 if !(is32Bit(c+d) && x.Op != OpSB) { 3400 break 3401 } 3402 v.reset(OpAMD64LEAQ8) 3403 v.AuxInt = c + d 3404 v.Aux = s 3405 v.AddArg(x) 3406 v.AddArg(y) 3407 return true 3408 } 3409 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 3410 // cond: is32Bit(c+8*d) && y.Op != OpSB 3411 // result: (LEAQ8 [c+8*d] {s} x y) 3412 for { 3413 c := v.AuxInt 3414 s := v.Aux 3415 x := v.Args[0] 3416 v_1 := v.Args[1] 3417 if v_1.Op != OpAMD64ADDQconst { 3418 break 3419 } 3420 d := v_1.AuxInt 3421 y := v_1.Args[0] 3422 if !(is32Bit(c+8*d) && y.Op != OpSB) { 3423 break 3424 } 3425 v.reset(OpAMD64LEAQ8) 3426 v.AuxInt = c + 8*d 3427 v.Aux = s 3428 v.AddArg(x) 3429 v.AddArg(y) 3430 return true 3431 } 3432 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3433 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3434 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3435 for { 3436 off1 := v.AuxInt 3437 sym1 := v.Aux 3438 v_0 := v.Args[0] 3439 if v_0.Op != OpAMD64LEAQ { 3440 break 3441 } 3442 off2 := v_0.AuxInt 3443 sym2 := v_0.Aux 3444 x := v_0.Args[0] 3445 y := v.Args[1] 3446 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3447 break 3448 } 3449 v.reset(OpAMD64LEAQ8) 3450 v.AuxInt = off1 + off2 3451 v.Aux = mergeSym(sym1, sym2) 3452 v.AddArg(x) 3453 v.AddArg(y) 3454 return true 3455 } 3456 return false 3457 } 3458 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { 3459 b := v.Block 3460 _ = b 3461 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 3462 // cond: x.Uses == 1 && clobber(x) 3463 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3464 for { 3465 x := v.Args[0] 3466 if x.Op != OpAMD64MOVBload { 3467 break 3468 } 3469 off := x.AuxInt 3470 sym := x.Aux 3471 ptr := x.Args[0] 3472 mem := x.Args[1] 3473 if !(x.Uses == 1 && clobber(x)) { 3474 break 3475 } 3476 b = x.Block 3477 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 3478 v.reset(OpCopy) 3479 v.AddArg(v0) 3480 v0.AuxInt = off 3481 v0.Aux = sym 3482 v0.AddArg(ptr) 3483 v0.AddArg(mem) 3484 return true 3485 } 3486 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 3487 // cond: x.Uses == 1 && clobber(x) 3488 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3489 for { 3490 x := v.Args[0] 3491 if x.Op != OpAMD64MOVWload { 3492 break 3493 } 3494 off := x.AuxInt 3495 sym := x.Aux 3496 ptr := x.Args[0] 3497 mem := x.Args[1] 3498 if !(x.Uses == 1 && clobber(x)) { 3499 break 3500 } 3501 b = x.Block 3502 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 3503 v.reset(OpCopy) 3504 v.AddArg(v0) 3505 v0.AuxInt = off 3506 v0.Aux = sym 3507 v0.AddArg(ptr) 3508 v0.AddArg(mem) 3509 return true 3510 } 3511 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 3512 // cond: x.Uses == 1 && clobber(x) 3513 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3514 for { 3515 x := v.Args[0] 3516 if x.Op != OpAMD64MOVLload { 3517 break 3518 } 3519 off := x.AuxInt 3520 sym := x.Aux 3521 ptr := x.Args[0] 3522 mem := x.Args[1] 3523 if !(x.Uses == 1 && clobber(x)) { 3524 break 3525 } 3526 b = x.Block 3527 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 3528 v.reset(OpCopy) 3529 v.AddArg(v0) 3530 v0.AuxInt = off 3531 v0.Aux = sym 3532 v0.AddArg(ptr) 3533 v0.AddArg(mem) 3534 return true 3535 } 3536 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 3537 // cond: x.Uses == 1 && clobber(x) 3538 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 3539 for { 3540 x := v.Args[0] 3541 if x.Op != OpAMD64MOVQload { 3542 break 3543 } 3544 off := x.AuxInt 3545 sym := x.Aux 3546 ptr := x.Args[0] 3547 mem := x.Args[1] 3548 if !(x.Uses == 1 && clobber(x)) { 3549 break 3550 } 3551 b = x.Block 3552 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 3553 v.reset(OpCopy) 3554 v.AddArg(v0) 3555 v0.AuxInt = off 3556 v0.Aux = sym 3557 v0.AddArg(ptr) 3558 v0.AddArg(mem) 3559 return true 3560 } 3561 // match: (MOVBQSX (ANDLconst [c] x)) 3562 // cond: c & 0x80 == 0 3563 // result: (ANDLconst [c & 0x7f] x) 3564 for { 3565 v_0 := v.Args[0] 3566 if v_0.Op != OpAMD64ANDLconst { 3567 break 3568 } 3569 c := v_0.AuxInt 3570 x := v_0.Args[0] 3571 if !(c&0x80 == 0) { 3572 break 3573 } 3574 v.reset(OpAMD64ANDLconst) 3575 v.AuxInt = c & 0x7f 3576 v.AddArg(x) 3577 return true 3578 } 3579 return false 3580 } 3581 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool { 3582 b := v.Block 3583 _ = b 3584 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 3585 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3586 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3587 for { 3588 off1 := v.AuxInt 3589 sym1 := v.Aux 3590 v_0 := v.Args[0] 3591 if v_0.Op != OpAMD64LEAQ { 3592 break 3593 } 3594 off2 := v_0.AuxInt 3595 sym2 := v_0.Aux 3596 base := v_0.Args[0] 3597 mem := v.Args[1] 3598 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3599 break 3600 } 3601 v.reset(OpAMD64MOVBQSXload) 3602 v.AuxInt = off1 + off2 3603 v.Aux = mergeSym(sym1, sym2) 3604 v.AddArg(base) 3605 v.AddArg(mem) 3606 return true 3607 } 3608 return false 3609 } 3610 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { 3611 b := v.Block 3612 _ = b 3613 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 3614 // cond: x.Uses == 1 && clobber(x) 3615 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3616 for { 3617 x := v.Args[0] 3618 if x.Op != OpAMD64MOVBload { 3619 break 3620 } 3621 off := x.AuxInt 3622 sym := x.Aux 3623 ptr := x.Args[0] 3624 mem := x.Args[1] 3625 if !(x.Uses == 1 && clobber(x)) { 3626 break 3627 } 3628 b = x.Block 3629 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 3630 v.reset(OpCopy) 3631 v.AddArg(v0) 3632 v0.AuxInt = off 3633 v0.Aux = sym 3634 v0.AddArg(ptr) 3635 v0.AddArg(mem) 3636 return true 3637 } 3638 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 3639 // cond: x.Uses == 1 && clobber(x) 3640 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3641 for { 3642 x := v.Args[0] 3643 if x.Op != OpAMD64MOVWload { 3644 break 3645 } 3646 off := x.AuxInt 3647 sym := x.Aux 3648 ptr := x.Args[0] 3649 mem := x.Args[1] 3650 if !(x.Uses == 1 && clobber(x)) { 3651 break 3652 } 3653 b = x.Block 3654 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 3655 v.reset(OpCopy) 3656 v.AddArg(v0) 3657 v0.AuxInt = off 3658 v0.Aux = sym 3659 v0.AddArg(ptr) 3660 v0.AddArg(mem) 3661 return true 3662 } 3663 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 3664 // cond: x.Uses == 1 && clobber(x) 3665 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3666 for { 3667 x := v.Args[0] 3668 if x.Op != OpAMD64MOVLload { 3669 break 3670 } 3671 off := x.AuxInt 3672 sym := x.Aux 3673 ptr := x.Args[0] 3674 mem := x.Args[1] 3675 if !(x.Uses == 1 && clobber(x)) { 3676 break 3677 } 3678 b = x.Block 3679 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 3680 v.reset(OpCopy) 3681 v.AddArg(v0) 3682 v0.AuxInt = off 3683 v0.Aux = sym 3684 v0.AddArg(ptr) 3685 v0.AddArg(mem) 3686 return true 3687 } 3688 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 3689 // cond: x.Uses == 1 && clobber(x) 3690 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 3691 for { 3692 x := v.Args[0] 3693 if x.Op != OpAMD64MOVQload { 3694 break 3695 } 3696 off := x.AuxInt 3697 sym := x.Aux 3698 ptr := x.Args[0] 3699 mem := x.Args[1] 3700 if !(x.Uses == 1 && clobber(x)) { 3701 break 3702 } 3703 b = x.Block 3704 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 3705 v.reset(OpCopy) 3706 v.AddArg(v0) 3707 v0.AuxInt = off 3708 v0.Aux = sym 3709 v0.AddArg(ptr) 3710 v0.AddArg(mem) 3711 return true 3712 } 3713 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 3714 // cond: x.Uses == 1 && clobber(x) 3715 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 3716 for { 3717 x := v.Args[0] 3718 if x.Op != OpAMD64MOVBloadidx1 { 3719 break 3720 } 3721 off := x.AuxInt 3722 sym := x.Aux 3723 ptr := x.Args[0] 3724 idx := x.Args[1] 3725 mem := x.Args[2] 3726 if !(x.Uses == 1 && clobber(x)) { 3727 break 3728 } 3729 b = x.Block 3730 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 3731 v.reset(OpCopy) 3732 v.AddArg(v0) 3733 v0.AuxInt = off 3734 v0.Aux = sym 3735 v0.AddArg(ptr) 3736 v0.AddArg(idx) 3737 v0.AddArg(mem) 3738 return true 3739 } 3740 // match: (MOVBQZX (ANDLconst [c] x)) 3741 // cond: 3742 // result: (ANDLconst [c & 0xff] x) 3743 for { 3744 v_0 := v.Args[0] 3745 if v_0.Op != OpAMD64ANDLconst { 3746 break 3747 } 3748 c := v_0.AuxInt 3749 x := v_0.Args[0] 3750 v.reset(OpAMD64ANDLconst) 3751 v.AuxInt = c & 0xff 3752 v.AddArg(x) 3753 return true 3754 } 3755 return false 3756 } 3757 func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool { 3758 b := v.Block 3759 _ = b 3760 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 3761 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 3762 // result: x 3763 for { 3764 off := v.AuxInt 3765 sym := v.Aux 3766 ptr := v.Args[0] 3767 v_1 := v.Args[1] 3768 if v_1.Op != OpAMD64MOVBstore { 3769 break 3770 } 3771 off2 := v_1.AuxInt 3772 sym2 := v_1.Aux 3773 ptr2 := v_1.Args[0] 3774 x := v_1.Args[1] 3775 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 3776 break 3777 } 3778 v.reset(OpCopy) 3779 v.Type = x.Type 3780 v.AddArg(x) 3781 return true 3782 } 3783 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 3784 // cond: is32Bit(off1+off2) 3785 // result: (MOVBload [off1+off2] {sym} ptr mem) 3786 for { 3787 off1 := v.AuxInt 3788 sym := v.Aux 3789 v_0 := v.Args[0] 3790 if v_0.Op != OpAMD64ADDQconst { 3791 break 3792 } 3793 off2 := v_0.AuxInt 3794 ptr := v_0.Args[0] 3795 mem := v.Args[1] 3796 if !(is32Bit(off1 + off2)) { 3797 break 3798 } 3799 v.reset(OpAMD64MOVBload) 3800 v.AuxInt = off1 + off2 3801 v.Aux = sym 3802 v.AddArg(ptr) 3803 v.AddArg(mem) 3804 return true 3805 } 3806 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 3807 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3808 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3809 for { 3810 off1 := v.AuxInt 3811 sym1 := v.Aux 3812 v_0 := v.Args[0] 3813 if v_0.Op != OpAMD64LEAQ { 3814 break 3815 } 3816 off2 := v_0.AuxInt 3817 sym2 := v_0.Aux 3818 base := v_0.Args[0] 3819 mem := v.Args[1] 3820 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3821 break 3822 } 3823 v.reset(OpAMD64MOVBload) 3824 v.AuxInt = off1 + off2 3825 v.Aux = mergeSym(sym1, sym2) 3826 v.AddArg(base) 3827 v.AddArg(mem) 3828 return true 3829 } 3830 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 3831 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3832 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 3833 for { 3834 off1 := v.AuxInt 3835 sym1 := v.Aux 3836 v_0 := v.Args[0] 3837 if v_0.Op != OpAMD64LEAQ1 { 3838 break 3839 } 3840 off2 := v_0.AuxInt 3841 sym2 := v_0.Aux 3842 ptr := v_0.Args[0] 3843 idx := v_0.Args[1] 3844 mem := v.Args[1] 3845 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3846 break 3847 } 3848 v.reset(OpAMD64MOVBloadidx1) 3849 v.AuxInt = off1 + off2 3850 v.Aux = mergeSym(sym1, sym2) 3851 v.AddArg(ptr) 3852 v.AddArg(idx) 3853 v.AddArg(mem) 3854 return true 3855 } 3856 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 3857 // cond: ptr.Op != OpSB 3858 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 3859 for { 3860 off := v.AuxInt 3861 sym := v.Aux 3862 v_0 := v.Args[0] 3863 if v_0.Op != OpAMD64ADDQ { 3864 break 3865 } 3866 ptr := v_0.Args[0] 3867 idx := v_0.Args[1] 3868 mem := v.Args[1] 3869 if !(ptr.Op != OpSB) { 3870 break 3871 } 3872 v.reset(OpAMD64MOVBloadidx1) 3873 v.AuxInt = off 3874 v.Aux = sym 3875 v.AddArg(ptr) 3876 v.AddArg(idx) 3877 v.AddArg(mem) 3878 return true 3879 } 3880 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 3881 // cond: canMergeSym(sym1, sym2) 3882 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 3883 for { 3884 off1 := v.AuxInt 3885 sym1 := v.Aux 3886 v_0 := v.Args[0] 3887 if v_0.Op != OpAMD64LEAL { 3888 break 3889 } 3890 off2 := v_0.AuxInt 3891 sym2 := v_0.Aux 3892 base := v_0.Args[0] 3893 mem := v.Args[1] 3894 if !(canMergeSym(sym1, sym2)) { 3895 break 3896 } 3897 v.reset(OpAMD64MOVBload) 3898 v.AuxInt = off1 + off2 3899 v.Aux = mergeSym(sym1, sym2) 3900 v.AddArg(base) 3901 v.AddArg(mem) 3902 return true 3903 } 3904 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 3905 // cond: is32Bit(off1+off2) 3906 // result: (MOVBload [off1+off2] {sym} ptr mem) 3907 for { 3908 off1 := v.AuxInt 3909 sym := v.Aux 3910 v_0 := v.Args[0] 3911 if v_0.Op != OpAMD64ADDLconst { 3912 break 3913 } 3914 off2 := v_0.AuxInt 3915 ptr := v_0.Args[0] 3916 mem := v.Args[1] 3917 if !(is32Bit(off1 + off2)) { 3918 break 3919 } 3920 v.reset(OpAMD64MOVBload) 3921 v.AuxInt = off1 + off2 3922 v.Aux = sym 3923 v.AddArg(ptr) 3924 v.AddArg(mem) 3925 return true 3926 } 3927 return false 3928 } 3929 func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool { 3930 b := v.Block 3931 _ = b 3932 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 3933 // cond: 3934 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 3935 for { 3936 c := v.AuxInt 3937 sym := v.Aux 3938 v_0 := v.Args[0] 3939 if v_0.Op != OpAMD64ADDQconst { 3940 break 3941 } 3942 d := v_0.AuxInt 3943 ptr := v_0.Args[0] 3944 idx := v.Args[1] 3945 mem := v.Args[2] 3946 v.reset(OpAMD64MOVBloadidx1) 3947 v.AuxInt = c + d 3948 v.Aux = sym 3949 v.AddArg(ptr) 3950 v.AddArg(idx) 3951 v.AddArg(mem) 3952 return true 3953 } 3954 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 3955 // cond: 3956 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 3957 for { 3958 c := v.AuxInt 3959 sym := v.Aux 3960 ptr := v.Args[0] 3961 v_1 := v.Args[1] 3962 if v_1.Op != OpAMD64ADDQconst { 3963 break 3964 } 3965 d := v_1.AuxInt 3966 idx := v_1.Args[0] 3967 mem := v.Args[2] 3968 v.reset(OpAMD64MOVBloadidx1) 3969 v.AuxInt = c + d 3970 v.Aux = sym 3971 v.AddArg(ptr) 3972 v.AddArg(idx) 3973 v.AddArg(mem) 3974 return true 3975 } 3976 return false 3977 } 3978 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { 3979 b := v.Block 3980 _ = b 3981 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 3982 // cond: 3983 // result: (MOVBstore [off] {sym} ptr x mem) 3984 for { 3985 off := v.AuxInt 3986 sym := v.Aux 3987 ptr := v.Args[0] 3988 v_1 := v.Args[1] 3989 if v_1.Op != OpAMD64MOVBQSX { 3990 break 3991 } 3992 x := v_1.Args[0] 3993 mem := v.Args[2] 3994 v.reset(OpAMD64MOVBstore) 3995 v.AuxInt = off 3996 v.Aux = sym 3997 v.AddArg(ptr) 3998 v.AddArg(x) 3999 v.AddArg(mem) 4000 return true 4001 } 4002 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 4003 // cond: 4004 // result: (MOVBstore [off] {sym} ptr x mem) 4005 for { 4006 off := v.AuxInt 4007 sym := v.Aux 4008 ptr := v.Args[0] 4009 v_1 := v.Args[1] 4010 if v_1.Op != OpAMD64MOVBQZX { 4011 break 4012 } 4013 x := v_1.Args[0] 4014 mem := v.Args[2] 4015 v.reset(OpAMD64MOVBstore) 4016 v.AuxInt = off 4017 v.Aux = sym 4018 v.AddArg(ptr) 4019 v.AddArg(x) 4020 v.AddArg(mem) 4021 return true 4022 } 4023 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 4024 // cond: is32Bit(off1+off2) 4025 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 4026 for { 4027 off1 := v.AuxInt 4028 sym := v.Aux 4029 v_0 := v.Args[0] 4030 if v_0.Op != OpAMD64ADDQconst { 4031 break 4032 } 4033 off2 := v_0.AuxInt 4034 ptr := v_0.Args[0] 4035 val := v.Args[1] 4036 mem := v.Args[2] 4037 if !(is32Bit(off1 + off2)) { 4038 break 4039 } 4040 v.reset(OpAMD64MOVBstore) 4041 v.AuxInt = off1 + off2 4042 v.Aux = sym 4043 v.AddArg(ptr) 4044 v.AddArg(val) 4045 v.AddArg(mem) 4046 return true 4047 } 4048 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 4049 // cond: validOff(off) 4050 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 4051 for { 4052 off := v.AuxInt 4053 sym := v.Aux 4054 ptr := v.Args[0] 4055 v_1 := v.Args[1] 4056 if v_1.Op != OpAMD64MOVLconst { 4057 break 4058 } 4059 c := v_1.AuxInt 4060 mem := v.Args[2] 4061 if !(validOff(off)) { 4062 break 4063 } 4064 v.reset(OpAMD64MOVBstoreconst) 4065 v.AuxInt = makeValAndOff(int64(int8(c)), off) 4066 v.Aux = sym 4067 v.AddArg(ptr) 4068 v.AddArg(mem) 4069 return true 4070 } 4071 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4072 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4073 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4074 for { 4075 off1 := v.AuxInt 4076 sym1 := v.Aux 4077 v_0 := v.Args[0] 4078 if v_0.Op != OpAMD64LEAQ { 4079 break 4080 } 4081 off2 := v_0.AuxInt 4082 sym2 := v_0.Aux 4083 base := v_0.Args[0] 4084 val := v.Args[1] 4085 mem := v.Args[2] 4086 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4087 break 4088 } 4089 v.reset(OpAMD64MOVBstore) 4090 v.AuxInt = off1 + off2 4091 v.Aux = mergeSym(sym1, sym2) 4092 v.AddArg(base) 4093 v.AddArg(val) 4094 v.AddArg(mem) 4095 return true 4096 } 4097 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 4098 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4099 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 4100 for { 4101 off1 := v.AuxInt 4102 sym1 := v.Aux 4103 v_0 := v.Args[0] 4104 if v_0.Op != OpAMD64LEAQ1 { 4105 break 4106 } 4107 off2 := v_0.AuxInt 4108 sym2 := v_0.Aux 4109 ptr := v_0.Args[0] 4110 idx := v_0.Args[1] 4111 val := v.Args[1] 4112 mem := v.Args[2] 4113 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4114 break 4115 } 4116 v.reset(OpAMD64MOVBstoreidx1) 4117 v.AuxInt = off1 + off2 4118 v.Aux = mergeSym(sym1, sym2) 4119 v.AddArg(ptr) 4120 v.AddArg(idx) 4121 v.AddArg(val) 4122 v.AddArg(mem) 4123 return true 4124 } 4125 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 4126 // cond: ptr.Op != OpSB 4127 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 4128 for { 4129 off := v.AuxInt 4130 sym := v.Aux 4131 v_0 := v.Args[0] 4132 if v_0.Op != OpAMD64ADDQ { 4133 break 4134 } 4135 ptr := v_0.Args[0] 4136 idx := v_0.Args[1] 4137 val := v.Args[1] 4138 mem := v.Args[2] 4139 if !(ptr.Op != OpSB) { 4140 break 4141 } 4142 v.reset(OpAMD64MOVBstoreidx1) 4143 v.AuxInt = off 4144 v.Aux = sym 4145 v.AddArg(ptr) 4146 v.AddArg(idx) 4147 v.AddArg(val) 4148 v.AddArg(mem) 4149 return true 4150 } 4151 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 4152 // cond: x0.Uses == 1 && clobber(x0) 4153 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 4154 for { 4155 i := v.AuxInt 4156 s := v.Aux 4157 p := v.Args[0] 4158 w := v.Args[1] 4159 x0 := v.Args[2] 4160 if x0.Op != OpAMD64MOVBstore { 4161 break 4162 } 4163 if x0.AuxInt != i-1 { 4164 break 4165 } 4166 if x0.Aux != s { 4167 break 4168 } 4169 if p != x0.Args[0] { 4170 break 4171 } 4172 x0_1 := x0.Args[1] 4173 if x0_1.Op != OpAMD64SHRWconst { 4174 break 4175 } 4176 if x0_1.AuxInt != 8 { 4177 break 4178 } 4179 if w != x0_1.Args[0] { 4180 break 4181 } 4182 mem := x0.Args[2] 4183 if !(x0.Uses == 1 && clobber(x0)) { 4184 break 4185 } 4186 v.reset(OpAMD64MOVWstore) 4187 v.AuxInt = i - 1 4188 v.Aux = s 4189 v.AddArg(p) 4190 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 4191 v0.AuxInt = 8 4192 v0.AddArg(w) 4193 v.AddArg(v0) 4194 v.AddArg(mem) 4195 return true 4196 } 4197 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 4198 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 4199 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 4200 for { 4201 i := v.AuxInt 4202 s := v.Aux 4203 p := v.Args[0] 4204 w := v.Args[1] 4205 x2 := v.Args[2] 4206 if x2.Op != OpAMD64MOVBstore { 4207 break 4208 } 4209 if x2.AuxInt != i-1 { 4210 break 4211 } 4212 if x2.Aux != s { 4213 break 4214 } 4215 if p != x2.Args[0] { 4216 break 4217 } 4218 x2_1 := x2.Args[1] 4219 if x2_1.Op != OpAMD64SHRLconst { 4220 break 4221 } 4222 if x2_1.AuxInt != 8 { 4223 break 4224 } 4225 if w != x2_1.Args[0] { 4226 break 4227 } 4228 x1 := x2.Args[2] 4229 if x1.Op != OpAMD64MOVBstore { 4230 break 4231 } 4232 if x1.AuxInt != i-2 { 4233 break 4234 } 4235 if x1.Aux != s { 4236 break 4237 } 4238 if p != x1.Args[0] { 4239 break 4240 } 4241 x1_1 := x1.Args[1] 4242 if x1_1.Op != OpAMD64SHRLconst { 4243 break 4244 } 4245 if x1_1.AuxInt != 16 { 4246 break 4247 } 4248 if w != x1_1.Args[0] { 4249 break 4250 } 4251 x0 := x1.Args[2] 4252 if x0.Op != OpAMD64MOVBstore { 4253 break 4254 } 4255 if x0.AuxInt != i-3 { 4256 break 4257 } 4258 if x0.Aux != s { 4259 break 4260 } 4261 if p != x0.Args[0] { 4262 break 4263 } 4264 x0_1 := x0.Args[1] 4265 if x0_1.Op != OpAMD64SHRLconst { 4266 break 4267 } 4268 if x0_1.AuxInt != 24 { 4269 break 4270 } 4271 if w != x0_1.Args[0] { 4272 break 4273 } 4274 mem := x0.Args[2] 4275 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 4276 break 4277 } 4278 v.reset(OpAMD64MOVLstore) 4279 v.AuxInt = i - 3 4280 v.Aux = s 4281 v.AddArg(p) 4282 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 4283 v0.AddArg(w) 4284 v.AddArg(v0) 4285 v.AddArg(mem) 4286 return true 4287 } 4288 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 4289 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 4290 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 4291 for { 4292 i := v.AuxInt 4293 s := v.Aux 4294 p := v.Args[0] 4295 w := v.Args[1] 4296 x6 := v.Args[2] 4297 if x6.Op != OpAMD64MOVBstore { 4298 break 4299 } 4300 if x6.AuxInt != i-1 { 4301 break 4302 } 4303 if x6.Aux != s { 4304 break 4305 } 4306 if p != x6.Args[0] { 4307 break 4308 } 4309 x6_1 := x6.Args[1] 4310 if x6_1.Op != OpAMD64SHRQconst { 4311 break 4312 } 4313 if x6_1.AuxInt != 8 { 4314 break 4315 } 4316 if w != x6_1.Args[0] { 4317 break 4318 } 4319 x5 := x6.Args[2] 4320 if x5.Op != OpAMD64MOVBstore { 4321 break 4322 } 4323 if x5.AuxInt != i-2 { 4324 break 4325 } 4326 if x5.Aux != s { 4327 break 4328 } 4329 if p != x5.Args[0] { 4330 break 4331 } 4332 x5_1 := x5.Args[1] 4333 if x5_1.Op != OpAMD64SHRQconst { 4334 break 4335 } 4336 if x5_1.AuxInt != 16 { 4337 break 4338 } 4339 if w != x5_1.Args[0] { 4340 break 4341 } 4342 x4 := x5.Args[2] 4343 if x4.Op != OpAMD64MOVBstore { 4344 break 4345 } 4346 if x4.AuxInt != i-3 { 4347 break 4348 } 4349 if x4.Aux != s { 4350 break 4351 } 4352 if p != x4.Args[0] { 4353 break 4354 } 4355 x4_1 := x4.Args[1] 4356 if x4_1.Op != OpAMD64SHRQconst { 4357 break 4358 } 4359 if x4_1.AuxInt != 24 { 4360 break 4361 } 4362 if w != x4_1.Args[0] { 4363 break 4364 } 4365 x3 := x4.Args[2] 4366 if x3.Op != OpAMD64MOVBstore { 4367 break 4368 } 4369 if x3.AuxInt != i-4 { 4370 break 4371 } 4372 if x3.Aux != s { 4373 break 4374 } 4375 if p != x3.Args[0] { 4376 break 4377 } 4378 x3_1 := x3.Args[1] 4379 if x3_1.Op != OpAMD64SHRQconst { 4380 break 4381 } 4382 if x3_1.AuxInt != 32 { 4383 break 4384 } 4385 if w != x3_1.Args[0] { 4386 break 4387 } 4388 x2 := x3.Args[2] 4389 if x2.Op != OpAMD64MOVBstore { 4390 break 4391 } 4392 if x2.AuxInt != i-5 { 4393 break 4394 } 4395 if x2.Aux != s { 4396 break 4397 } 4398 if p != x2.Args[0] { 4399 break 4400 } 4401 x2_1 := x2.Args[1] 4402 if x2_1.Op != OpAMD64SHRQconst { 4403 break 4404 } 4405 if x2_1.AuxInt != 40 { 4406 break 4407 } 4408 if w != x2_1.Args[0] { 4409 break 4410 } 4411 x1 := x2.Args[2] 4412 if x1.Op != OpAMD64MOVBstore { 4413 break 4414 } 4415 if x1.AuxInt != i-6 { 4416 break 4417 } 4418 if x1.Aux != s { 4419 break 4420 } 4421 if p != x1.Args[0] { 4422 break 4423 } 4424 x1_1 := x1.Args[1] 4425 if x1_1.Op != OpAMD64SHRQconst { 4426 break 4427 } 4428 if x1_1.AuxInt != 48 { 4429 break 4430 } 4431 if w != x1_1.Args[0] { 4432 break 4433 } 4434 x0 := x1.Args[2] 4435 if x0.Op != OpAMD64MOVBstore { 4436 break 4437 } 4438 if x0.AuxInt != i-7 { 4439 break 4440 } 4441 if x0.Aux != s { 4442 break 4443 } 4444 if p != x0.Args[0] { 4445 break 4446 } 4447 x0_1 := x0.Args[1] 4448 if x0_1.Op != OpAMD64SHRQconst { 4449 break 4450 } 4451 if x0_1.AuxInt != 56 { 4452 break 4453 } 4454 if w != x0_1.Args[0] { 4455 break 4456 } 4457 mem := x0.Args[2] 4458 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 4459 break 4460 } 4461 v.reset(OpAMD64MOVQstore) 4462 v.AuxInt = i - 7 4463 v.Aux = s 4464 v.AddArg(p) 4465 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 4466 v0.AddArg(w) 4467 v.AddArg(v0) 4468 v.AddArg(mem) 4469 return true 4470 } 4471 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 4472 // cond: x.Uses == 1 && clobber(x) 4473 // result: (MOVWstore [i-1] {s} p w mem) 4474 for { 4475 i := v.AuxInt 4476 s := v.Aux 4477 p := v.Args[0] 4478 v_1 := v.Args[1] 4479 if v_1.Op != OpAMD64SHRQconst { 4480 break 4481 } 4482 if v_1.AuxInt != 8 { 4483 break 4484 } 4485 w := v_1.Args[0] 4486 x := v.Args[2] 4487 if x.Op != OpAMD64MOVBstore { 4488 break 4489 } 4490 if x.AuxInt != i-1 { 4491 break 4492 } 4493 if x.Aux != s { 4494 break 4495 } 4496 if p != x.Args[0] { 4497 break 4498 } 4499 if w != x.Args[1] { 4500 break 4501 } 4502 mem := x.Args[2] 4503 if !(x.Uses == 1 && clobber(x)) { 4504 break 4505 } 4506 v.reset(OpAMD64MOVWstore) 4507 v.AuxInt = i - 1 4508 v.Aux = s 4509 v.AddArg(p) 4510 v.AddArg(w) 4511 v.AddArg(mem) 4512 return true 4513 } 4514 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 4515 // cond: x.Uses == 1 && clobber(x) 4516 // result: (MOVWstore [i-1] {s} p w0 mem) 4517 for { 4518 i := v.AuxInt 4519 s := v.Aux 4520 p := v.Args[0] 4521 v_1 := v.Args[1] 4522 if v_1.Op != OpAMD64SHRQconst { 4523 break 4524 } 4525 j := v_1.AuxInt 4526 w := v_1.Args[0] 4527 x := v.Args[2] 4528 if x.Op != OpAMD64MOVBstore { 4529 break 4530 } 4531 if x.AuxInt != i-1 { 4532 break 4533 } 4534 if x.Aux != s { 4535 break 4536 } 4537 if p != x.Args[0] { 4538 break 4539 } 4540 w0 := x.Args[1] 4541 if w0.Op != OpAMD64SHRQconst { 4542 break 4543 } 4544 if w0.AuxInt != j-8 { 4545 break 4546 } 4547 if w != w0.Args[0] { 4548 break 4549 } 4550 mem := x.Args[2] 4551 if !(x.Uses == 1 && clobber(x)) { 4552 break 4553 } 4554 v.reset(OpAMD64MOVWstore) 4555 v.AuxInt = i - 1 4556 v.Aux = s 4557 v.AddArg(p) 4558 v.AddArg(w0) 4559 v.AddArg(mem) 4560 return true 4561 } 4562 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 4563 // cond: canMergeSym(sym1, sym2) 4564 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4565 for { 4566 off1 := v.AuxInt 4567 sym1 := v.Aux 4568 v_0 := v.Args[0] 4569 if v_0.Op != OpAMD64LEAL { 4570 break 4571 } 4572 off2 := v_0.AuxInt 4573 sym2 := v_0.Aux 4574 base := v_0.Args[0] 4575 val := v.Args[1] 4576 mem := v.Args[2] 4577 if !(canMergeSym(sym1, sym2)) { 4578 break 4579 } 4580 v.reset(OpAMD64MOVBstore) 4581 v.AuxInt = off1 + off2 4582 v.Aux = mergeSym(sym1, sym2) 4583 v.AddArg(base) 4584 v.AddArg(val) 4585 v.AddArg(mem) 4586 return true 4587 } 4588 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 4589 // cond: is32Bit(off1+off2) 4590 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 4591 for { 4592 off1 := v.AuxInt 4593 sym := v.Aux 4594 v_0 := v.Args[0] 4595 if v_0.Op != OpAMD64ADDLconst { 4596 break 4597 } 4598 off2 := v_0.AuxInt 4599 ptr := v_0.Args[0] 4600 val := v.Args[1] 4601 mem := v.Args[2] 4602 if !(is32Bit(off1 + off2)) { 4603 break 4604 } 4605 v.reset(OpAMD64MOVBstore) 4606 v.AuxInt = off1 + off2 4607 v.Aux = sym 4608 v.AddArg(ptr) 4609 v.AddArg(val) 4610 v.AddArg(mem) 4611 return true 4612 } 4613 return false 4614 } 4615 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool { 4616 b := v.Block 4617 _ = b 4618 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 4619 // cond: ValAndOff(sc).canAdd(off) 4620 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 4621 for { 4622 sc := v.AuxInt 4623 s := v.Aux 4624 v_0 := v.Args[0] 4625 if v_0.Op != OpAMD64ADDQconst { 4626 break 4627 } 4628 off := v_0.AuxInt 4629 ptr := v_0.Args[0] 4630 mem := v.Args[1] 4631 if !(ValAndOff(sc).canAdd(off)) { 4632 break 4633 } 4634 v.reset(OpAMD64MOVBstoreconst) 4635 v.AuxInt = ValAndOff(sc).add(off) 4636 v.Aux = s 4637 v.AddArg(ptr) 4638 v.AddArg(mem) 4639 return true 4640 } 4641 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 4642 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 4643 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 4644 for { 4645 sc := v.AuxInt 4646 sym1 := v.Aux 4647 v_0 := v.Args[0] 4648 if v_0.Op != OpAMD64LEAQ { 4649 break 4650 } 4651 off := v_0.AuxInt 4652 sym2 := v_0.Aux 4653 ptr := v_0.Args[0] 4654 mem := v.Args[1] 4655 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 4656 break 4657 } 4658 v.reset(OpAMD64MOVBstoreconst) 4659 v.AuxInt = ValAndOff(sc).add(off) 4660 v.Aux = mergeSym(sym1, sym2) 4661 v.AddArg(ptr) 4662 v.AddArg(mem) 4663 return true 4664 } 4665 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 4666 // cond: canMergeSym(sym1, sym2) 4667 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 4668 for { 4669 x := v.AuxInt 4670 sym1 := v.Aux 4671 v_0 := v.Args[0] 4672 if v_0.Op != OpAMD64LEAQ1 { 4673 break 4674 } 4675 off := v_0.AuxInt 4676 sym2 := v_0.Aux 4677 ptr := v_0.Args[0] 4678 idx := v_0.Args[1] 4679 mem := v.Args[1] 4680 if !(canMergeSym(sym1, sym2)) { 4681 break 4682 } 4683 v.reset(OpAMD64MOVBstoreconstidx1) 4684 v.AuxInt = ValAndOff(x).add(off) 4685 v.Aux = mergeSym(sym1, sym2) 4686 v.AddArg(ptr) 4687 v.AddArg(idx) 4688 v.AddArg(mem) 4689 return true 4690 } 4691 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 4692 // cond: 4693 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 4694 for { 4695 x := v.AuxInt 4696 sym := v.Aux 4697 v_0 := v.Args[0] 4698 if v_0.Op != OpAMD64ADDQ { 4699 break 4700 } 4701 ptr := v_0.Args[0] 4702 idx := v_0.Args[1] 4703 mem := v.Args[1] 4704 v.reset(OpAMD64MOVBstoreconstidx1) 4705 v.AuxInt = x 4706 v.Aux = sym 4707 v.AddArg(ptr) 4708 v.AddArg(idx) 4709 v.AddArg(mem) 4710 return true 4711 } 4712 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 4713 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 4714 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 4715 for { 4716 c := v.AuxInt 4717 s := v.Aux 4718 p := v.Args[0] 4719 x := v.Args[1] 4720 if x.Op != OpAMD64MOVBstoreconst { 4721 break 4722 } 4723 a := x.AuxInt 4724 if x.Aux != s { 4725 break 4726 } 4727 if p != x.Args[0] { 4728 break 4729 } 4730 mem := x.Args[1] 4731 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 4732 break 4733 } 4734 v.reset(OpAMD64MOVWstoreconst) 4735 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 4736 v.Aux = s 4737 v.AddArg(p) 4738 v.AddArg(mem) 4739 return true 4740 } 4741 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 4742 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 4743 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 4744 for { 4745 sc := v.AuxInt 4746 sym1 := v.Aux 4747 v_0 := v.Args[0] 4748 if v_0.Op != OpAMD64LEAL { 4749 break 4750 } 4751 off := v_0.AuxInt 4752 sym2 := v_0.Aux 4753 ptr := v_0.Args[0] 4754 mem := v.Args[1] 4755 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 4756 break 4757 } 4758 v.reset(OpAMD64MOVBstoreconst) 4759 v.AuxInt = ValAndOff(sc).add(off) 4760 v.Aux = mergeSym(sym1, sym2) 4761 v.AddArg(ptr) 4762 v.AddArg(mem) 4763 return true 4764 } 4765 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 4766 // cond: ValAndOff(sc).canAdd(off) 4767 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 4768 for { 4769 sc := v.AuxInt 4770 s := v.Aux 4771 v_0 := v.Args[0] 4772 if v_0.Op != OpAMD64ADDLconst { 4773 break 4774 } 4775 off := v_0.AuxInt 4776 ptr := v_0.Args[0] 4777 mem := v.Args[1] 4778 if !(ValAndOff(sc).canAdd(off)) { 4779 break 4780 } 4781 v.reset(OpAMD64MOVBstoreconst) 4782 v.AuxInt = ValAndOff(sc).add(off) 4783 v.Aux = s 4784 v.AddArg(ptr) 4785 v.AddArg(mem) 4786 return true 4787 } 4788 return false 4789 } 4790 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool { 4791 b := v.Block 4792 _ = b 4793 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 4794 // cond: 4795 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 4796 for { 4797 x := v.AuxInt 4798 sym := v.Aux 4799 v_0 := v.Args[0] 4800 if v_0.Op != OpAMD64ADDQconst { 4801 break 4802 } 4803 c := v_0.AuxInt 4804 ptr := v_0.Args[0] 4805 idx := v.Args[1] 4806 mem := v.Args[2] 4807 v.reset(OpAMD64MOVBstoreconstidx1) 4808 v.AuxInt = ValAndOff(x).add(c) 4809 v.Aux = sym 4810 v.AddArg(ptr) 4811 v.AddArg(idx) 4812 v.AddArg(mem) 4813 return true 4814 } 4815 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 4816 // cond: 4817 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 4818 for { 4819 x := v.AuxInt 4820 sym := v.Aux 4821 ptr := v.Args[0] 4822 v_1 := v.Args[1] 4823 if v_1.Op != OpAMD64ADDQconst { 4824 break 4825 } 4826 c := v_1.AuxInt 4827 idx := v_1.Args[0] 4828 mem := v.Args[2] 4829 v.reset(OpAMD64MOVBstoreconstidx1) 4830 v.AuxInt = ValAndOff(x).add(c) 4831 v.Aux = sym 4832 v.AddArg(ptr) 4833 v.AddArg(idx) 4834 v.AddArg(mem) 4835 return true 4836 } 4837 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 4838 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 4839 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 4840 for { 4841 c := v.AuxInt 4842 s := v.Aux 4843 p := v.Args[0] 4844 i := v.Args[1] 4845 x := v.Args[2] 4846 if x.Op != OpAMD64MOVBstoreconstidx1 { 4847 break 4848 } 4849 a := x.AuxInt 4850 if x.Aux != s { 4851 break 4852 } 4853 if p != x.Args[0] { 4854 break 4855 } 4856 if i != x.Args[1] { 4857 break 4858 } 4859 mem := x.Args[2] 4860 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 4861 break 4862 } 4863 v.reset(OpAMD64MOVWstoreconstidx1) 4864 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 4865 v.Aux = s 4866 v.AddArg(p) 4867 v.AddArg(i) 4868 v.AddArg(mem) 4869 return true 4870 } 4871 return false 4872 } 4873 func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool { 4874 b := v.Block 4875 _ = b 4876 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 4877 // cond: 4878 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 4879 for { 4880 c := v.AuxInt 4881 sym := v.Aux 4882 v_0 := v.Args[0] 4883 if v_0.Op != OpAMD64ADDQconst { 4884 break 4885 } 4886 d := v_0.AuxInt 4887 ptr := v_0.Args[0] 4888 idx := v.Args[1] 4889 val := v.Args[2] 4890 mem := v.Args[3] 4891 v.reset(OpAMD64MOVBstoreidx1) 4892 v.AuxInt = c + d 4893 v.Aux = sym 4894 v.AddArg(ptr) 4895 v.AddArg(idx) 4896 v.AddArg(val) 4897 v.AddArg(mem) 4898 return true 4899 } 4900 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 4901 // cond: 4902 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 4903 for { 4904 c := v.AuxInt 4905 sym := v.Aux 4906 ptr := v.Args[0] 4907 v_1 := v.Args[1] 4908 if v_1.Op != OpAMD64ADDQconst { 4909 break 4910 } 4911 d := v_1.AuxInt 4912 idx := v_1.Args[0] 4913 val := v.Args[2] 4914 mem := v.Args[3] 4915 v.reset(OpAMD64MOVBstoreidx1) 4916 v.AuxInt = c + d 4917 v.Aux = sym 4918 v.AddArg(ptr) 4919 v.AddArg(idx) 4920 v.AddArg(val) 4921 v.AddArg(mem) 4922 return true 4923 } 4924 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 4925 // cond: x0.Uses == 1 && clobber(x0) 4926 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 4927 for { 4928 i := v.AuxInt 4929 s := v.Aux 4930 p := v.Args[0] 4931 idx := v.Args[1] 4932 w := v.Args[2] 4933 x0 := v.Args[3] 4934 if x0.Op != OpAMD64MOVBstoreidx1 { 4935 break 4936 } 4937 if x0.AuxInt != i-1 { 4938 break 4939 } 4940 if x0.Aux != s { 4941 break 4942 } 4943 if p != x0.Args[0] { 4944 break 4945 } 4946 if idx != x0.Args[1] { 4947 break 4948 } 4949 x0_2 := x0.Args[2] 4950 if x0_2.Op != OpAMD64SHRWconst { 4951 break 4952 } 4953 if x0_2.AuxInt != 8 { 4954 break 4955 } 4956 if w != x0_2.Args[0] { 4957 break 4958 } 4959 mem := x0.Args[3] 4960 if !(x0.Uses == 1 && clobber(x0)) { 4961 break 4962 } 4963 v.reset(OpAMD64MOVWstoreidx1) 4964 v.AuxInt = i - 1 4965 v.Aux = s 4966 v.AddArg(p) 4967 v.AddArg(idx) 4968 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 4969 v0.AuxInt = 8 4970 v0.AddArg(w) 4971 v.AddArg(v0) 4972 v.AddArg(mem) 4973 return true 4974 } 4975 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 4976 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 4977 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 4978 for { 4979 i := v.AuxInt 4980 s := v.Aux 4981 p := v.Args[0] 4982 idx := v.Args[1] 4983 w := v.Args[2] 4984 x2 := v.Args[3] 4985 if x2.Op != OpAMD64MOVBstoreidx1 { 4986 break 4987 } 4988 if x2.AuxInt != i-1 { 4989 break 4990 } 4991 if x2.Aux != s { 4992 break 4993 } 4994 if p != x2.Args[0] { 4995 break 4996 } 4997 if idx != x2.Args[1] { 4998 break 4999 } 5000 x2_2 := x2.Args[2] 5001 if x2_2.Op != OpAMD64SHRLconst { 5002 break 5003 } 5004 if x2_2.AuxInt != 8 { 5005 break 5006 } 5007 if w != x2_2.Args[0] { 5008 break 5009 } 5010 x1 := x2.Args[3] 5011 if x1.Op != OpAMD64MOVBstoreidx1 { 5012 break 5013 } 5014 if x1.AuxInt != i-2 { 5015 break 5016 } 5017 if x1.Aux != s { 5018 break 5019 } 5020 if p != x1.Args[0] { 5021 break 5022 } 5023 if idx != x1.Args[1] { 5024 break 5025 } 5026 x1_2 := x1.Args[2] 5027 if x1_2.Op != OpAMD64SHRLconst { 5028 break 5029 } 5030 if x1_2.AuxInt != 16 { 5031 break 5032 } 5033 if w != x1_2.Args[0] { 5034 break 5035 } 5036 x0 := x1.Args[3] 5037 if x0.Op != OpAMD64MOVBstoreidx1 { 5038 break 5039 } 5040 if x0.AuxInt != i-3 { 5041 break 5042 } 5043 if x0.Aux != s { 5044 break 5045 } 5046 if p != x0.Args[0] { 5047 break 5048 } 5049 if idx != x0.Args[1] { 5050 break 5051 } 5052 x0_2 := x0.Args[2] 5053 if x0_2.Op != OpAMD64SHRLconst { 5054 break 5055 } 5056 if x0_2.AuxInt != 24 { 5057 break 5058 } 5059 if w != x0_2.Args[0] { 5060 break 5061 } 5062 mem := x0.Args[3] 5063 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5064 break 5065 } 5066 v.reset(OpAMD64MOVLstoreidx1) 5067 v.AuxInt = i - 3 5068 v.Aux = s 5069 v.AddArg(p) 5070 v.AddArg(idx) 5071 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5072 v0.AddArg(w) 5073 v.AddArg(v0) 5074 v.AddArg(mem) 5075 return true 5076 } 5077 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 5078 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5079 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 5080 for { 5081 i := v.AuxInt 5082 s := v.Aux 5083 p := v.Args[0] 5084 idx := v.Args[1] 5085 w := v.Args[2] 5086 x6 := v.Args[3] 5087 if x6.Op != OpAMD64MOVBstoreidx1 { 5088 break 5089 } 5090 if x6.AuxInt != i-1 { 5091 break 5092 } 5093 if x6.Aux != s { 5094 break 5095 } 5096 if p != x6.Args[0] { 5097 break 5098 } 5099 if idx != x6.Args[1] { 5100 break 5101 } 5102 x6_2 := x6.Args[2] 5103 if x6_2.Op != OpAMD64SHRQconst { 5104 break 5105 } 5106 if x6_2.AuxInt != 8 { 5107 break 5108 } 5109 if w != x6_2.Args[0] { 5110 break 5111 } 5112 x5 := x6.Args[3] 5113 if x5.Op != OpAMD64MOVBstoreidx1 { 5114 break 5115 } 5116 if x5.AuxInt != i-2 { 5117 break 5118 } 5119 if x5.Aux != s { 5120 break 5121 } 5122 if p != x5.Args[0] { 5123 break 5124 } 5125 if idx != x5.Args[1] { 5126 break 5127 } 5128 x5_2 := x5.Args[2] 5129 if x5_2.Op != OpAMD64SHRQconst { 5130 break 5131 } 5132 if x5_2.AuxInt != 16 { 5133 break 5134 } 5135 if w != x5_2.Args[0] { 5136 break 5137 } 5138 x4 := x5.Args[3] 5139 if x4.Op != OpAMD64MOVBstoreidx1 { 5140 break 5141 } 5142 if x4.AuxInt != i-3 { 5143 break 5144 } 5145 if x4.Aux != s { 5146 break 5147 } 5148 if p != x4.Args[0] { 5149 break 5150 } 5151 if idx != x4.Args[1] { 5152 break 5153 } 5154 x4_2 := x4.Args[2] 5155 if x4_2.Op != OpAMD64SHRQconst { 5156 break 5157 } 5158 if x4_2.AuxInt != 24 { 5159 break 5160 } 5161 if w != x4_2.Args[0] { 5162 break 5163 } 5164 x3 := x4.Args[3] 5165 if x3.Op != OpAMD64MOVBstoreidx1 { 5166 break 5167 } 5168 if x3.AuxInt != i-4 { 5169 break 5170 } 5171 if x3.Aux != s { 5172 break 5173 } 5174 if p != x3.Args[0] { 5175 break 5176 } 5177 if idx != x3.Args[1] { 5178 break 5179 } 5180 x3_2 := x3.Args[2] 5181 if x3_2.Op != OpAMD64SHRQconst { 5182 break 5183 } 5184 if x3_2.AuxInt != 32 { 5185 break 5186 } 5187 if w != x3_2.Args[0] { 5188 break 5189 } 5190 x2 := x3.Args[3] 5191 if x2.Op != OpAMD64MOVBstoreidx1 { 5192 break 5193 } 5194 if x2.AuxInt != i-5 { 5195 break 5196 } 5197 if x2.Aux != s { 5198 break 5199 } 5200 if p != x2.Args[0] { 5201 break 5202 } 5203 if idx != x2.Args[1] { 5204 break 5205 } 5206 x2_2 := x2.Args[2] 5207 if x2_2.Op != OpAMD64SHRQconst { 5208 break 5209 } 5210 if x2_2.AuxInt != 40 { 5211 break 5212 } 5213 if w != x2_2.Args[0] { 5214 break 5215 } 5216 x1 := x2.Args[3] 5217 if x1.Op != OpAMD64MOVBstoreidx1 { 5218 break 5219 } 5220 if x1.AuxInt != i-6 { 5221 break 5222 } 5223 if x1.Aux != s { 5224 break 5225 } 5226 if p != x1.Args[0] { 5227 break 5228 } 5229 if idx != x1.Args[1] { 5230 break 5231 } 5232 x1_2 := x1.Args[2] 5233 if x1_2.Op != OpAMD64SHRQconst { 5234 break 5235 } 5236 if x1_2.AuxInt != 48 { 5237 break 5238 } 5239 if w != x1_2.Args[0] { 5240 break 5241 } 5242 x0 := x1.Args[3] 5243 if x0.Op != OpAMD64MOVBstoreidx1 { 5244 break 5245 } 5246 if x0.AuxInt != i-7 { 5247 break 5248 } 5249 if x0.Aux != s { 5250 break 5251 } 5252 if p != x0.Args[0] { 5253 break 5254 } 5255 if idx != x0.Args[1] { 5256 break 5257 } 5258 x0_2 := x0.Args[2] 5259 if x0_2.Op != OpAMD64SHRQconst { 5260 break 5261 } 5262 if x0_2.AuxInt != 56 { 5263 break 5264 } 5265 if w != x0_2.Args[0] { 5266 break 5267 } 5268 mem := x0.Args[3] 5269 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5270 break 5271 } 5272 v.reset(OpAMD64MOVQstoreidx1) 5273 v.AuxInt = i - 7 5274 v.Aux = s 5275 v.AddArg(p) 5276 v.AddArg(idx) 5277 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5278 v0.AddArg(w) 5279 v.AddArg(v0) 5280 v.AddArg(mem) 5281 return true 5282 } 5283 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 5284 // cond: x.Uses == 1 && clobber(x) 5285 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 5286 for { 5287 i := v.AuxInt 5288 s := v.Aux 5289 p := v.Args[0] 5290 idx := v.Args[1] 5291 v_2 := v.Args[2] 5292 if v_2.Op != OpAMD64SHRQconst { 5293 break 5294 } 5295 if v_2.AuxInt != 8 { 5296 break 5297 } 5298 w := v_2.Args[0] 5299 x := v.Args[3] 5300 if x.Op != OpAMD64MOVBstoreidx1 { 5301 break 5302 } 5303 if x.AuxInt != i-1 { 5304 break 5305 } 5306 if x.Aux != s { 5307 break 5308 } 5309 if p != x.Args[0] { 5310 break 5311 } 5312 if idx != x.Args[1] { 5313 break 5314 } 5315 if w != x.Args[2] { 5316 break 5317 } 5318 mem := x.Args[3] 5319 if !(x.Uses == 1 && clobber(x)) { 5320 break 5321 } 5322 v.reset(OpAMD64MOVWstoreidx1) 5323 v.AuxInt = i - 1 5324 v.Aux = s 5325 v.AddArg(p) 5326 v.AddArg(idx) 5327 v.AddArg(w) 5328 v.AddArg(mem) 5329 return true 5330 } 5331 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 5332 // cond: x.Uses == 1 && clobber(x) 5333 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 5334 for { 5335 i := v.AuxInt 5336 s := v.Aux 5337 p := v.Args[0] 5338 idx := v.Args[1] 5339 v_2 := v.Args[2] 5340 if v_2.Op != OpAMD64SHRQconst { 5341 break 5342 } 5343 j := v_2.AuxInt 5344 w := v_2.Args[0] 5345 x := v.Args[3] 5346 if x.Op != OpAMD64MOVBstoreidx1 { 5347 break 5348 } 5349 if x.AuxInt != i-1 { 5350 break 5351 } 5352 if x.Aux != s { 5353 break 5354 } 5355 if p != x.Args[0] { 5356 break 5357 } 5358 if idx != x.Args[1] { 5359 break 5360 } 5361 w0 := x.Args[2] 5362 if w0.Op != OpAMD64SHRQconst { 5363 break 5364 } 5365 if w0.AuxInt != j-8 { 5366 break 5367 } 5368 if w != w0.Args[0] { 5369 break 5370 } 5371 mem := x.Args[3] 5372 if !(x.Uses == 1 && clobber(x)) { 5373 break 5374 } 5375 v.reset(OpAMD64MOVWstoreidx1) 5376 v.AuxInt = i - 1 5377 v.Aux = s 5378 v.AddArg(p) 5379 v.AddArg(idx) 5380 v.AddArg(w0) 5381 v.AddArg(mem) 5382 return true 5383 } 5384 return false 5385 } 5386 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { 5387 b := v.Block 5388 _ = b 5389 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 5390 // cond: x.Uses == 1 && clobber(x) 5391 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 5392 for { 5393 x := v.Args[0] 5394 if x.Op != OpAMD64MOVLload { 5395 break 5396 } 5397 off := x.AuxInt 5398 sym := x.Aux 5399 ptr := x.Args[0] 5400 mem := x.Args[1] 5401 if !(x.Uses == 1 && clobber(x)) { 5402 break 5403 } 5404 b = x.Block 5405 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 5406 v.reset(OpCopy) 5407 v.AddArg(v0) 5408 v0.AuxInt = off 5409 v0.Aux = sym 5410 v0.AddArg(ptr) 5411 v0.AddArg(mem) 5412 return true 5413 } 5414 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 5415 // cond: x.Uses == 1 && clobber(x) 5416 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 5417 for { 5418 x := v.Args[0] 5419 if x.Op != OpAMD64MOVQload { 5420 break 5421 } 5422 off := x.AuxInt 5423 sym := x.Aux 5424 ptr := x.Args[0] 5425 mem := x.Args[1] 5426 if !(x.Uses == 1 && clobber(x)) { 5427 break 5428 } 5429 b = x.Block 5430 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 5431 v.reset(OpCopy) 5432 v.AddArg(v0) 5433 v0.AuxInt = off 5434 v0.Aux = sym 5435 v0.AddArg(ptr) 5436 v0.AddArg(mem) 5437 return true 5438 } 5439 // match: (MOVLQSX (ANDLconst [c] x)) 5440 // cond: c & 0x80000000 == 0 5441 // result: (ANDLconst [c & 0x7fffffff] x) 5442 for { 5443 v_0 := v.Args[0] 5444 if v_0.Op != OpAMD64ANDLconst { 5445 break 5446 } 5447 c := v_0.AuxInt 5448 x := v_0.Args[0] 5449 if !(c&0x80000000 == 0) { 5450 break 5451 } 5452 v.reset(OpAMD64ANDLconst) 5453 v.AuxInt = c & 0x7fffffff 5454 v.AddArg(x) 5455 return true 5456 } 5457 return false 5458 } 5459 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool { 5460 b := v.Block 5461 _ = b 5462 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 5463 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5464 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5465 for { 5466 off1 := v.AuxInt 5467 sym1 := v.Aux 5468 v_0 := v.Args[0] 5469 if v_0.Op != OpAMD64LEAQ { 5470 break 5471 } 5472 off2 := v_0.AuxInt 5473 sym2 := v_0.Aux 5474 base := v_0.Args[0] 5475 mem := v.Args[1] 5476 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5477 break 5478 } 5479 v.reset(OpAMD64MOVLQSXload) 5480 v.AuxInt = off1 + off2 5481 v.Aux = mergeSym(sym1, sym2) 5482 v.AddArg(base) 5483 v.AddArg(mem) 5484 return true 5485 } 5486 return false 5487 } 5488 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { 5489 b := v.Block 5490 _ = b 5491 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 5492 // cond: x.Uses == 1 && clobber(x) 5493 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 5494 for { 5495 x := v.Args[0] 5496 if x.Op != OpAMD64MOVLload { 5497 break 5498 } 5499 off := x.AuxInt 5500 sym := x.Aux 5501 ptr := x.Args[0] 5502 mem := x.Args[1] 5503 if !(x.Uses == 1 && clobber(x)) { 5504 break 5505 } 5506 b = x.Block 5507 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 5508 v.reset(OpCopy) 5509 v.AddArg(v0) 5510 v0.AuxInt = off 5511 v0.Aux = sym 5512 v0.AddArg(ptr) 5513 v0.AddArg(mem) 5514 return true 5515 } 5516 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 5517 // cond: x.Uses == 1 && clobber(x) 5518 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 5519 for { 5520 x := v.Args[0] 5521 if x.Op != OpAMD64MOVQload { 5522 break 5523 } 5524 off := x.AuxInt 5525 sym := x.Aux 5526 ptr := x.Args[0] 5527 mem := x.Args[1] 5528 if !(x.Uses == 1 && clobber(x)) { 5529 break 5530 } 5531 b = x.Block 5532 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 5533 v.reset(OpCopy) 5534 v.AddArg(v0) 5535 v0.AuxInt = off 5536 v0.Aux = sym 5537 v0.AddArg(ptr) 5538 v0.AddArg(mem) 5539 return true 5540 } 5541 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 5542 // cond: x.Uses == 1 && clobber(x) 5543 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 5544 for { 5545 x := v.Args[0] 5546 if x.Op != OpAMD64MOVLloadidx1 { 5547 break 5548 } 5549 off := x.AuxInt 5550 sym := x.Aux 5551 ptr := x.Args[0] 5552 idx := x.Args[1] 5553 mem := x.Args[2] 5554 if !(x.Uses == 1 && clobber(x)) { 5555 break 5556 } 5557 b = x.Block 5558 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 5559 v.reset(OpCopy) 5560 v.AddArg(v0) 5561 v0.AuxInt = off 5562 v0.Aux = sym 5563 v0.AddArg(ptr) 5564 v0.AddArg(idx) 5565 v0.AddArg(mem) 5566 return true 5567 } 5568 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 5569 // cond: x.Uses == 1 && clobber(x) 5570 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 5571 for { 5572 x := v.Args[0] 5573 if x.Op != OpAMD64MOVLloadidx4 { 5574 break 5575 } 5576 off := x.AuxInt 5577 sym := x.Aux 5578 ptr := x.Args[0] 5579 idx := x.Args[1] 5580 mem := x.Args[2] 5581 if !(x.Uses == 1 && clobber(x)) { 5582 break 5583 } 5584 b = x.Block 5585 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 5586 v.reset(OpCopy) 5587 v.AddArg(v0) 5588 v0.AuxInt = off 5589 v0.Aux = sym 5590 v0.AddArg(ptr) 5591 v0.AddArg(idx) 5592 v0.AddArg(mem) 5593 return true 5594 } 5595 // match: (MOVLQZX (ANDLconst [c] x)) 5596 // cond: 5597 // result: (ANDLconst [c] x) 5598 for { 5599 v_0 := v.Args[0] 5600 if v_0.Op != OpAMD64ANDLconst { 5601 break 5602 } 5603 c := v_0.AuxInt 5604 x := v_0.Args[0] 5605 v.reset(OpAMD64ANDLconst) 5606 v.AuxInt = c 5607 v.AddArg(x) 5608 return true 5609 } 5610 return false 5611 } 5612 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value, config *Config) bool { 5613 b := v.Block 5614 _ = b 5615 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 5616 // cond: is32Bit(off1+off2) 5617 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 5618 for { 5619 off1 := v.AuxInt 5620 sym := v.Aux 5621 v_0 := v.Args[0] 5622 if v_0.Op != OpAMD64ADDQconst { 5623 break 5624 } 5625 off2 := v_0.AuxInt 5626 ptr := v_0.Args[0] 5627 mem := v.Args[1] 5628 if !(is32Bit(off1 + off2)) { 5629 break 5630 } 5631 v.reset(OpAMD64MOVLatomicload) 5632 v.AuxInt = off1 + off2 5633 v.Aux = sym 5634 v.AddArg(ptr) 5635 v.AddArg(mem) 5636 return true 5637 } 5638 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 5639 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5640 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 5641 for { 5642 off1 := v.AuxInt 5643 sym1 := v.Aux 5644 v_0 := v.Args[0] 5645 if v_0.Op != OpAMD64LEAQ { 5646 break 5647 } 5648 off2 := v_0.AuxInt 5649 sym2 := v_0.Aux 5650 ptr := v_0.Args[0] 5651 mem := v.Args[1] 5652 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5653 break 5654 } 5655 v.reset(OpAMD64MOVLatomicload) 5656 v.AuxInt = off1 + off2 5657 v.Aux = mergeSym(sym1, sym2) 5658 v.AddArg(ptr) 5659 v.AddArg(mem) 5660 return true 5661 } 5662 return false 5663 } 5664 func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool { 5665 b := v.Block 5666 _ = b 5667 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 5668 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 5669 // result: x 5670 for { 5671 off := v.AuxInt 5672 sym := v.Aux 5673 ptr := v.Args[0] 5674 v_1 := v.Args[1] 5675 if v_1.Op != OpAMD64MOVLstore { 5676 break 5677 } 5678 off2 := v_1.AuxInt 5679 sym2 := v_1.Aux 5680 ptr2 := v_1.Args[0] 5681 x := v_1.Args[1] 5682 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 5683 break 5684 } 5685 v.reset(OpCopy) 5686 v.Type = x.Type 5687 v.AddArg(x) 5688 return true 5689 } 5690 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 5691 // cond: is32Bit(off1+off2) 5692 // result: (MOVLload [off1+off2] {sym} ptr mem) 5693 for { 5694 off1 := v.AuxInt 5695 sym := v.Aux 5696 v_0 := v.Args[0] 5697 if v_0.Op != OpAMD64ADDQconst { 5698 break 5699 } 5700 off2 := v_0.AuxInt 5701 ptr := v_0.Args[0] 5702 mem := v.Args[1] 5703 if !(is32Bit(off1 + off2)) { 5704 break 5705 } 5706 v.reset(OpAMD64MOVLload) 5707 v.AuxInt = off1 + off2 5708 v.Aux = sym 5709 v.AddArg(ptr) 5710 v.AddArg(mem) 5711 return true 5712 } 5713 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 5714 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5715 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5716 for { 5717 off1 := v.AuxInt 5718 sym1 := v.Aux 5719 v_0 := v.Args[0] 5720 if v_0.Op != OpAMD64LEAQ { 5721 break 5722 } 5723 off2 := v_0.AuxInt 5724 sym2 := v_0.Aux 5725 base := v_0.Args[0] 5726 mem := v.Args[1] 5727 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5728 break 5729 } 5730 v.reset(OpAMD64MOVLload) 5731 v.AuxInt = off1 + off2 5732 v.Aux = mergeSym(sym1, sym2) 5733 v.AddArg(base) 5734 v.AddArg(mem) 5735 return true 5736 } 5737 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 5738 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5739 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 5740 for { 5741 off1 := v.AuxInt 5742 sym1 := v.Aux 5743 v_0 := v.Args[0] 5744 if v_0.Op != OpAMD64LEAQ1 { 5745 break 5746 } 5747 off2 := v_0.AuxInt 5748 sym2 := v_0.Aux 5749 ptr := v_0.Args[0] 5750 idx := v_0.Args[1] 5751 mem := v.Args[1] 5752 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5753 break 5754 } 5755 v.reset(OpAMD64MOVLloadidx1) 5756 v.AuxInt = off1 + off2 5757 v.Aux = mergeSym(sym1, sym2) 5758 v.AddArg(ptr) 5759 v.AddArg(idx) 5760 v.AddArg(mem) 5761 return true 5762 } 5763 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 5764 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5765 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 5766 for { 5767 off1 := v.AuxInt 5768 sym1 := v.Aux 5769 v_0 := v.Args[0] 5770 if v_0.Op != OpAMD64LEAQ4 { 5771 break 5772 } 5773 off2 := v_0.AuxInt 5774 sym2 := v_0.Aux 5775 ptr := v_0.Args[0] 5776 idx := v_0.Args[1] 5777 mem := v.Args[1] 5778 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5779 break 5780 } 5781 v.reset(OpAMD64MOVLloadidx4) 5782 v.AuxInt = off1 + off2 5783 v.Aux = mergeSym(sym1, sym2) 5784 v.AddArg(ptr) 5785 v.AddArg(idx) 5786 v.AddArg(mem) 5787 return true 5788 } 5789 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 5790 // cond: ptr.Op != OpSB 5791 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 5792 for { 5793 off := v.AuxInt 5794 sym := v.Aux 5795 v_0 := v.Args[0] 5796 if v_0.Op != OpAMD64ADDQ { 5797 break 5798 } 5799 ptr := v_0.Args[0] 5800 idx := v_0.Args[1] 5801 mem := v.Args[1] 5802 if !(ptr.Op != OpSB) { 5803 break 5804 } 5805 v.reset(OpAMD64MOVLloadidx1) 5806 v.AuxInt = off 5807 v.Aux = sym 5808 v.AddArg(ptr) 5809 v.AddArg(idx) 5810 v.AddArg(mem) 5811 return true 5812 } 5813 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 5814 // cond: canMergeSym(sym1, sym2) 5815 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5816 for { 5817 off1 := v.AuxInt 5818 sym1 := v.Aux 5819 v_0 := v.Args[0] 5820 if v_0.Op != OpAMD64LEAL { 5821 break 5822 } 5823 off2 := v_0.AuxInt 5824 sym2 := v_0.Aux 5825 base := v_0.Args[0] 5826 mem := v.Args[1] 5827 if !(canMergeSym(sym1, sym2)) { 5828 break 5829 } 5830 v.reset(OpAMD64MOVLload) 5831 v.AuxInt = off1 + off2 5832 v.Aux = mergeSym(sym1, sym2) 5833 v.AddArg(base) 5834 v.AddArg(mem) 5835 return true 5836 } 5837 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 5838 // cond: is32Bit(off1+off2) 5839 // result: (MOVLload [off1+off2] {sym} ptr mem) 5840 for { 5841 off1 := v.AuxInt 5842 sym := v.Aux 5843 v_0 := v.Args[0] 5844 if v_0.Op != OpAMD64ADDLconst { 5845 break 5846 } 5847 off2 := v_0.AuxInt 5848 ptr := v_0.Args[0] 5849 mem := v.Args[1] 5850 if !(is32Bit(off1 + off2)) { 5851 break 5852 } 5853 v.reset(OpAMD64MOVLload) 5854 v.AuxInt = off1 + off2 5855 v.Aux = sym 5856 v.AddArg(ptr) 5857 v.AddArg(mem) 5858 return true 5859 } 5860 return false 5861 } 5862 func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool { 5863 b := v.Block 5864 _ = b 5865 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 5866 // cond: 5867 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 5868 for { 5869 c := v.AuxInt 5870 sym := v.Aux 5871 ptr := v.Args[0] 5872 v_1 := v.Args[1] 5873 if v_1.Op != OpAMD64SHLQconst { 5874 break 5875 } 5876 if v_1.AuxInt != 2 { 5877 break 5878 } 5879 idx := v_1.Args[0] 5880 mem := v.Args[2] 5881 v.reset(OpAMD64MOVLloadidx4) 5882 v.AuxInt = c 5883 v.Aux = sym 5884 v.AddArg(ptr) 5885 v.AddArg(idx) 5886 v.AddArg(mem) 5887 return true 5888 } 5889 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 5890 // cond: 5891 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 5892 for { 5893 c := v.AuxInt 5894 sym := v.Aux 5895 v_0 := v.Args[0] 5896 if v_0.Op != OpAMD64ADDQconst { 5897 break 5898 } 5899 d := v_0.AuxInt 5900 ptr := v_0.Args[0] 5901 idx := v.Args[1] 5902 mem := v.Args[2] 5903 v.reset(OpAMD64MOVLloadidx1) 5904 v.AuxInt = c + d 5905 v.Aux = sym 5906 v.AddArg(ptr) 5907 v.AddArg(idx) 5908 v.AddArg(mem) 5909 return true 5910 } 5911 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 5912 // cond: 5913 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 5914 for { 5915 c := v.AuxInt 5916 sym := v.Aux 5917 ptr := v.Args[0] 5918 v_1 := v.Args[1] 5919 if v_1.Op != OpAMD64ADDQconst { 5920 break 5921 } 5922 d := v_1.AuxInt 5923 idx := v_1.Args[0] 5924 mem := v.Args[2] 5925 v.reset(OpAMD64MOVLloadidx1) 5926 v.AuxInt = c + d 5927 v.Aux = sym 5928 v.AddArg(ptr) 5929 v.AddArg(idx) 5930 v.AddArg(mem) 5931 return true 5932 } 5933 return false 5934 } 5935 func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool { 5936 b := v.Block 5937 _ = b 5938 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 5939 // cond: 5940 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 5941 for { 5942 c := v.AuxInt 5943 sym := v.Aux 5944 v_0 := v.Args[0] 5945 if v_0.Op != OpAMD64ADDQconst { 5946 break 5947 } 5948 d := v_0.AuxInt 5949 ptr := v_0.Args[0] 5950 idx := v.Args[1] 5951 mem := v.Args[2] 5952 v.reset(OpAMD64MOVLloadidx4) 5953 v.AuxInt = c + d 5954 v.Aux = sym 5955 v.AddArg(ptr) 5956 v.AddArg(idx) 5957 v.AddArg(mem) 5958 return true 5959 } 5960 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 5961 // cond: 5962 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 5963 for { 5964 c := v.AuxInt 5965 sym := v.Aux 5966 ptr := v.Args[0] 5967 v_1 := v.Args[1] 5968 if v_1.Op != OpAMD64ADDQconst { 5969 break 5970 } 5971 d := v_1.AuxInt 5972 idx := v_1.Args[0] 5973 mem := v.Args[2] 5974 v.reset(OpAMD64MOVLloadidx4) 5975 v.AuxInt = c + 4*d 5976 v.Aux = sym 5977 v.AddArg(ptr) 5978 v.AddArg(idx) 5979 v.AddArg(mem) 5980 return true 5981 } 5982 return false 5983 } 5984 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool { 5985 b := v.Block 5986 _ = b 5987 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 5988 // cond: 5989 // result: (MOVLstore [off] {sym} ptr x mem) 5990 for { 5991 off := v.AuxInt 5992 sym := v.Aux 5993 ptr := v.Args[0] 5994 v_1 := v.Args[1] 5995 if v_1.Op != OpAMD64MOVLQSX { 5996 break 5997 } 5998 x := v_1.Args[0] 5999 mem := v.Args[2] 6000 v.reset(OpAMD64MOVLstore) 6001 v.AuxInt = off 6002 v.Aux = sym 6003 v.AddArg(ptr) 6004 v.AddArg(x) 6005 v.AddArg(mem) 6006 return true 6007 } 6008 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 6009 // cond: 6010 // result: (MOVLstore [off] {sym} ptr x mem) 6011 for { 6012 off := v.AuxInt 6013 sym := v.Aux 6014 ptr := v.Args[0] 6015 v_1 := v.Args[1] 6016 if v_1.Op != OpAMD64MOVLQZX { 6017 break 6018 } 6019 x := v_1.Args[0] 6020 mem := v.Args[2] 6021 v.reset(OpAMD64MOVLstore) 6022 v.AuxInt = off 6023 v.Aux = sym 6024 v.AddArg(ptr) 6025 v.AddArg(x) 6026 v.AddArg(mem) 6027 return true 6028 } 6029 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 6030 // cond: is32Bit(off1+off2) 6031 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 6032 for { 6033 off1 := v.AuxInt 6034 sym := v.Aux 6035 v_0 := v.Args[0] 6036 if v_0.Op != OpAMD64ADDQconst { 6037 break 6038 } 6039 off2 := v_0.AuxInt 6040 ptr := v_0.Args[0] 6041 val := v.Args[1] 6042 mem := v.Args[2] 6043 if !(is32Bit(off1 + off2)) { 6044 break 6045 } 6046 v.reset(OpAMD64MOVLstore) 6047 v.AuxInt = off1 + off2 6048 v.Aux = sym 6049 v.AddArg(ptr) 6050 v.AddArg(val) 6051 v.AddArg(mem) 6052 return true 6053 } 6054 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 6055 // cond: validOff(off) 6056 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 6057 for { 6058 off := v.AuxInt 6059 sym := v.Aux 6060 ptr := v.Args[0] 6061 v_1 := v.Args[1] 6062 if v_1.Op != OpAMD64MOVLconst { 6063 break 6064 } 6065 c := v_1.AuxInt 6066 mem := v.Args[2] 6067 if !(validOff(off)) { 6068 break 6069 } 6070 v.reset(OpAMD64MOVLstoreconst) 6071 v.AuxInt = makeValAndOff(int64(int32(c)), off) 6072 v.Aux = sym 6073 v.AddArg(ptr) 6074 v.AddArg(mem) 6075 return true 6076 } 6077 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6078 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6079 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6080 for { 6081 off1 := v.AuxInt 6082 sym1 := v.Aux 6083 v_0 := v.Args[0] 6084 if v_0.Op != OpAMD64LEAQ { 6085 break 6086 } 6087 off2 := v_0.AuxInt 6088 sym2 := v_0.Aux 6089 base := v_0.Args[0] 6090 val := v.Args[1] 6091 mem := v.Args[2] 6092 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6093 break 6094 } 6095 v.reset(OpAMD64MOVLstore) 6096 v.AuxInt = off1 + off2 6097 v.Aux = mergeSym(sym1, sym2) 6098 v.AddArg(base) 6099 v.AddArg(val) 6100 v.AddArg(mem) 6101 return true 6102 } 6103 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 6104 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6105 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6106 for { 6107 off1 := v.AuxInt 6108 sym1 := v.Aux 6109 v_0 := v.Args[0] 6110 if v_0.Op != OpAMD64LEAQ1 { 6111 break 6112 } 6113 off2 := v_0.AuxInt 6114 sym2 := v_0.Aux 6115 ptr := v_0.Args[0] 6116 idx := v_0.Args[1] 6117 val := v.Args[1] 6118 mem := v.Args[2] 6119 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6120 break 6121 } 6122 v.reset(OpAMD64MOVLstoreidx1) 6123 v.AuxInt = off1 + off2 6124 v.Aux = mergeSym(sym1, sym2) 6125 v.AddArg(ptr) 6126 v.AddArg(idx) 6127 v.AddArg(val) 6128 v.AddArg(mem) 6129 return true 6130 } 6131 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 6132 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6133 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 6134 for { 6135 off1 := v.AuxInt 6136 sym1 := v.Aux 6137 v_0 := v.Args[0] 6138 if v_0.Op != OpAMD64LEAQ4 { 6139 break 6140 } 6141 off2 := v_0.AuxInt 6142 sym2 := v_0.Aux 6143 ptr := v_0.Args[0] 6144 idx := v_0.Args[1] 6145 val := v.Args[1] 6146 mem := v.Args[2] 6147 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6148 break 6149 } 6150 v.reset(OpAMD64MOVLstoreidx4) 6151 v.AuxInt = off1 + off2 6152 v.Aux = mergeSym(sym1, sym2) 6153 v.AddArg(ptr) 6154 v.AddArg(idx) 6155 v.AddArg(val) 6156 v.AddArg(mem) 6157 return true 6158 } 6159 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 6160 // cond: ptr.Op != OpSB 6161 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 6162 for { 6163 off := v.AuxInt 6164 sym := v.Aux 6165 v_0 := v.Args[0] 6166 if v_0.Op != OpAMD64ADDQ { 6167 break 6168 } 6169 ptr := v_0.Args[0] 6170 idx := v_0.Args[1] 6171 val := v.Args[1] 6172 mem := v.Args[2] 6173 if !(ptr.Op != OpSB) { 6174 break 6175 } 6176 v.reset(OpAMD64MOVLstoreidx1) 6177 v.AuxInt = off 6178 v.Aux = sym 6179 v.AddArg(ptr) 6180 v.AddArg(idx) 6181 v.AddArg(val) 6182 v.AddArg(mem) 6183 return true 6184 } 6185 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 6186 // cond: x.Uses == 1 && clobber(x) 6187 // result: (MOVQstore [i-4] {s} p w mem) 6188 for { 6189 i := v.AuxInt 6190 s := v.Aux 6191 p := v.Args[0] 6192 v_1 := v.Args[1] 6193 if v_1.Op != OpAMD64SHRQconst { 6194 break 6195 } 6196 if v_1.AuxInt != 32 { 6197 break 6198 } 6199 w := v_1.Args[0] 6200 x := v.Args[2] 6201 if x.Op != OpAMD64MOVLstore { 6202 break 6203 } 6204 if x.AuxInt != i-4 { 6205 break 6206 } 6207 if x.Aux != s { 6208 break 6209 } 6210 if p != x.Args[0] { 6211 break 6212 } 6213 if w != x.Args[1] { 6214 break 6215 } 6216 mem := x.Args[2] 6217 if !(x.Uses == 1 && clobber(x)) { 6218 break 6219 } 6220 v.reset(OpAMD64MOVQstore) 6221 v.AuxInt = i - 4 6222 v.Aux = s 6223 v.AddArg(p) 6224 v.AddArg(w) 6225 v.AddArg(mem) 6226 return true 6227 } 6228 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 6229 // cond: x.Uses == 1 && clobber(x) 6230 // result: (MOVQstore [i-4] {s} p w0 mem) 6231 for { 6232 i := v.AuxInt 6233 s := v.Aux 6234 p := v.Args[0] 6235 v_1 := v.Args[1] 6236 if v_1.Op != OpAMD64SHRQconst { 6237 break 6238 } 6239 j := v_1.AuxInt 6240 w := v_1.Args[0] 6241 x := v.Args[2] 6242 if x.Op != OpAMD64MOVLstore { 6243 break 6244 } 6245 if x.AuxInt != i-4 { 6246 break 6247 } 6248 if x.Aux != s { 6249 break 6250 } 6251 if p != x.Args[0] { 6252 break 6253 } 6254 w0 := x.Args[1] 6255 if w0.Op != OpAMD64SHRQconst { 6256 break 6257 } 6258 if w0.AuxInt != j-32 { 6259 break 6260 } 6261 if w != w0.Args[0] { 6262 break 6263 } 6264 mem := x.Args[2] 6265 if !(x.Uses == 1 && clobber(x)) { 6266 break 6267 } 6268 v.reset(OpAMD64MOVQstore) 6269 v.AuxInt = i - 4 6270 v.Aux = s 6271 v.AddArg(p) 6272 v.AddArg(w0) 6273 v.AddArg(mem) 6274 return true 6275 } 6276 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 6277 // cond: canMergeSym(sym1, sym2) 6278 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6279 for { 6280 off1 := v.AuxInt 6281 sym1 := v.Aux 6282 v_0 := v.Args[0] 6283 if v_0.Op != OpAMD64LEAL { 6284 break 6285 } 6286 off2 := v_0.AuxInt 6287 sym2 := v_0.Aux 6288 base := v_0.Args[0] 6289 val := v.Args[1] 6290 mem := v.Args[2] 6291 if !(canMergeSym(sym1, sym2)) { 6292 break 6293 } 6294 v.reset(OpAMD64MOVLstore) 6295 v.AuxInt = off1 + off2 6296 v.Aux = mergeSym(sym1, sym2) 6297 v.AddArg(base) 6298 v.AddArg(val) 6299 v.AddArg(mem) 6300 return true 6301 } 6302 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 6303 // cond: is32Bit(off1+off2) 6304 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 6305 for { 6306 off1 := v.AuxInt 6307 sym := v.Aux 6308 v_0 := v.Args[0] 6309 if v_0.Op != OpAMD64ADDLconst { 6310 break 6311 } 6312 off2 := v_0.AuxInt 6313 ptr := v_0.Args[0] 6314 val := v.Args[1] 6315 mem := v.Args[2] 6316 if !(is32Bit(off1 + off2)) { 6317 break 6318 } 6319 v.reset(OpAMD64MOVLstore) 6320 v.AuxInt = off1 + off2 6321 v.Aux = sym 6322 v.AddArg(ptr) 6323 v.AddArg(val) 6324 v.AddArg(mem) 6325 return true 6326 } 6327 return false 6328 } 6329 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool { 6330 b := v.Block 6331 _ = b 6332 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 6333 // cond: ValAndOff(sc).canAdd(off) 6334 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6335 for { 6336 sc := v.AuxInt 6337 s := v.Aux 6338 v_0 := v.Args[0] 6339 if v_0.Op != OpAMD64ADDQconst { 6340 break 6341 } 6342 off := v_0.AuxInt 6343 ptr := v_0.Args[0] 6344 mem := v.Args[1] 6345 if !(ValAndOff(sc).canAdd(off)) { 6346 break 6347 } 6348 v.reset(OpAMD64MOVLstoreconst) 6349 v.AuxInt = ValAndOff(sc).add(off) 6350 v.Aux = s 6351 v.AddArg(ptr) 6352 v.AddArg(mem) 6353 return true 6354 } 6355 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 6356 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6357 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6358 for { 6359 sc := v.AuxInt 6360 sym1 := v.Aux 6361 v_0 := v.Args[0] 6362 if v_0.Op != OpAMD64LEAQ { 6363 break 6364 } 6365 off := v_0.AuxInt 6366 sym2 := v_0.Aux 6367 ptr := v_0.Args[0] 6368 mem := v.Args[1] 6369 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6370 break 6371 } 6372 v.reset(OpAMD64MOVLstoreconst) 6373 v.AuxInt = ValAndOff(sc).add(off) 6374 v.Aux = mergeSym(sym1, sym2) 6375 v.AddArg(ptr) 6376 v.AddArg(mem) 6377 return true 6378 } 6379 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 6380 // cond: canMergeSym(sym1, sym2) 6381 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6382 for { 6383 x := v.AuxInt 6384 sym1 := v.Aux 6385 v_0 := v.Args[0] 6386 if v_0.Op != OpAMD64LEAQ1 { 6387 break 6388 } 6389 off := v_0.AuxInt 6390 sym2 := v_0.Aux 6391 ptr := v_0.Args[0] 6392 idx := v_0.Args[1] 6393 mem := v.Args[1] 6394 if !(canMergeSym(sym1, sym2)) { 6395 break 6396 } 6397 v.reset(OpAMD64MOVLstoreconstidx1) 6398 v.AuxInt = ValAndOff(x).add(off) 6399 v.Aux = mergeSym(sym1, sym2) 6400 v.AddArg(ptr) 6401 v.AddArg(idx) 6402 v.AddArg(mem) 6403 return true 6404 } 6405 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 6406 // cond: canMergeSym(sym1, sym2) 6407 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6408 for { 6409 x := v.AuxInt 6410 sym1 := v.Aux 6411 v_0 := v.Args[0] 6412 if v_0.Op != OpAMD64LEAQ4 { 6413 break 6414 } 6415 off := v_0.AuxInt 6416 sym2 := v_0.Aux 6417 ptr := v_0.Args[0] 6418 idx := v_0.Args[1] 6419 mem := v.Args[1] 6420 if !(canMergeSym(sym1, sym2)) { 6421 break 6422 } 6423 v.reset(OpAMD64MOVLstoreconstidx4) 6424 v.AuxInt = ValAndOff(x).add(off) 6425 v.Aux = mergeSym(sym1, sym2) 6426 v.AddArg(ptr) 6427 v.AddArg(idx) 6428 v.AddArg(mem) 6429 return true 6430 } 6431 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 6432 // cond: 6433 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 6434 for { 6435 x := v.AuxInt 6436 sym := v.Aux 6437 v_0 := v.Args[0] 6438 if v_0.Op != OpAMD64ADDQ { 6439 break 6440 } 6441 ptr := v_0.Args[0] 6442 idx := v_0.Args[1] 6443 mem := v.Args[1] 6444 v.reset(OpAMD64MOVLstoreconstidx1) 6445 v.AuxInt = x 6446 v.Aux = sym 6447 v.AddArg(ptr) 6448 v.AddArg(idx) 6449 v.AddArg(mem) 6450 return true 6451 } 6452 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 6453 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 6454 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 6455 for { 6456 c := v.AuxInt 6457 s := v.Aux 6458 p := v.Args[0] 6459 x := v.Args[1] 6460 if x.Op != OpAMD64MOVLstoreconst { 6461 break 6462 } 6463 a := x.AuxInt 6464 if x.Aux != s { 6465 break 6466 } 6467 if p != x.Args[0] { 6468 break 6469 } 6470 mem := x.Args[1] 6471 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 6472 break 6473 } 6474 v.reset(OpAMD64MOVQstore) 6475 v.AuxInt = ValAndOff(a).Off() 6476 v.Aux = s 6477 v.AddArg(p) 6478 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 6479 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 6480 v.AddArg(v0) 6481 v.AddArg(mem) 6482 return true 6483 } 6484 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 6485 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6486 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6487 for { 6488 sc := v.AuxInt 6489 sym1 := v.Aux 6490 v_0 := v.Args[0] 6491 if v_0.Op != OpAMD64LEAL { 6492 break 6493 } 6494 off := v_0.AuxInt 6495 sym2 := v_0.Aux 6496 ptr := v_0.Args[0] 6497 mem := v.Args[1] 6498 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6499 break 6500 } 6501 v.reset(OpAMD64MOVLstoreconst) 6502 v.AuxInt = ValAndOff(sc).add(off) 6503 v.Aux = mergeSym(sym1, sym2) 6504 v.AddArg(ptr) 6505 v.AddArg(mem) 6506 return true 6507 } 6508 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 6509 // cond: ValAndOff(sc).canAdd(off) 6510 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6511 for { 6512 sc := v.AuxInt 6513 s := v.Aux 6514 v_0 := v.Args[0] 6515 if v_0.Op != OpAMD64ADDLconst { 6516 break 6517 } 6518 off := v_0.AuxInt 6519 ptr := v_0.Args[0] 6520 mem := v.Args[1] 6521 if !(ValAndOff(sc).canAdd(off)) { 6522 break 6523 } 6524 v.reset(OpAMD64MOVLstoreconst) 6525 v.AuxInt = ValAndOff(sc).add(off) 6526 v.Aux = s 6527 v.AddArg(ptr) 6528 v.AddArg(mem) 6529 return true 6530 } 6531 return false 6532 } 6533 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool { 6534 b := v.Block 6535 _ = b 6536 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 6537 // cond: 6538 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 6539 for { 6540 c := v.AuxInt 6541 sym := v.Aux 6542 ptr := v.Args[0] 6543 v_1 := v.Args[1] 6544 if v_1.Op != OpAMD64SHLQconst { 6545 break 6546 } 6547 if v_1.AuxInt != 2 { 6548 break 6549 } 6550 idx := v_1.Args[0] 6551 mem := v.Args[2] 6552 v.reset(OpAMD64MOVLstoreconstidx4) 6553 v.AuxInt = c 6554 v.Aux = sym 6555 v.AddArg(ptr) 6556 v.AddArg(idx) 6557 v.AddArg(mem) 6558 return true 6559 } 6560 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 6561 // cond: 6562 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6563 for { 6564 x := v.AuxInt 6565 sym := v.Aux 6566 v_0 := v.Args[0] 6567 if v_0.Op != OpAMD64ADDQconst { 6568 break 6569 } 6570 c := v_0.AuxInt 6571 ptr := v_0.Args[0] 6572 idx := v.Args[1] 6573 mem := v.Args[2] 6574 v.reset(OpAMD64MOVLstoreconstidx1) 6575 v.AuxInt = ValAndOff(x).add(c) 6576 v.Aux = sym 6577 v.AddArg(ptr) 6578 v.AddArg(idx) 6579 v.AddArg(mem) 6580 return true 6581 } 6582 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 6583 // cond: 6584 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6585 for { 6586 x := v.AuxInt 6587 sym := v.Aux 6588 ptr := v.Args[0] 6589 v_1 := v.Args[1] 6590 if v_1.Op != OpAMD64ADDQconst { 6591 break 6592 } 6593 c := v_1.AuxInt 6594 idx := v_1.Args[0] 6595 mem := v.Args[2] 6596 v.reset(OpAMD64MOVLstoreconstidx1) 6597 v.AuxInt = ValAndOff(x).add(c) 6598 v.Aux = sym 6599 v.AddArg(ptr) 6600 v.AddArg(idx) 6601 v.AddArg(mem) 6602 return true 6603 } 6604 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 6605 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 6606 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 6607 for { 6608 c := v.AuxInt 6609 s := v.Aux 6610 p := v.Args[0] 6611 i := v.Args[1] 6612 x := v.Args[2] 6613 if x.Op != OpAMD64MOVLstoreconstidx1 { 6614 break 6615 } 6616 a := x.AuxInt 6617 if x.Aux != s { 6618 break 6619 } 6620 if p != x.Args[0] { 6621 break 6622 } 6623 if i != x.Args[1] { 6624 break 6625 } 6626 mem := x.Args[2] 6627 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 6628 break 6629 } 6630 v.reset(OpAMD64MOVQstoreidx1) 6631 v.AuxInt = ValAndOff(a).Off() 6632 v.Aux = s 6633 v.AddArg(p) 6634 v.AddArg(i) 6635 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 6636 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 6637 v.AddArg(v0) 6638 v.AddArg(mem) 6639 return true 6640 } 6641 return false 6642 } 6643 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool { 6644 b := v.Block 6645 _ = b 6646 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 6647 // cond: 6648 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6649 for { 6650 x := v.AuxInt 6651 sym := v.Aux 6652 v_0 := v.Args[0] 6653 if v_0.Op != OpAMD64ADDQconst { 6654 break 6655 } 6656 c := v_0.AuxInt 6657 ptr := v_0.Args[0] 6658 idx := v.Args[1] 6659 mem := v.Args[2] 6660 v.reset(OpAMD64MOVLstoreconstidx4) 6661 v.AuxInt = ValAndOff(x).add(c) 6662 v.Aux = sym 6663 v.AddArg(ptr) 6664 v.AddArg(idx) 6665 v.AddArg(mem) 6666 return true 6667 } 6668 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 6669 // cond: 6670 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 6671 for { 6672 x := v.AuxInt 6673 sym := v.Aux 6674 ptr := v.Args[0] 6675 v_1 := v.Args[1] 6676 if v_1.Op != OpAMD64ADDQconst { 6677 break 6678 } 6679 c := v_1.AuxInt 6680 idx := v_1.Args[0] 6681 mem := v.Args[2] 6682 v.reset(OpAMD64MOVLstoreconstidx4) 6683 v.AuxInt = ValAndOff(x).add(4 * c) 6684 v.Aux = sym 6685 v.AddArg(ptr) 6686 v.AddArg(idx) 6687 v.AddArg(mem) 6688 return true 6689 } 6690 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 6691 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 6692 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 6693 for { 6694 c := v.AuxInt 6695 s := v.Aux 6696 p := v.Args[0] 6697 i := v.Args[1] 6698 x := v.Args[2] 6699 if x.Op != OpAMD64MOVLstoreconstidx4 { 6700 break 6701 } 6702 a := x.AuxInt 6703 if x.Aux != s { 6704 break 6705 } 6706 if p != x.Args[0] { 6707 break 6708 } 6709 if i != x.Args[1] { 6710 break 6711 } 6712 mem := x.Args[2] 6713 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 6714 break 6715 } 6716 v.reset(OpAMD64MOVQstoreidx1) 6717 v.AuxInt = ValAndOff(a).Off() 6718 v.Aux = s 6719 v.AddArg(p) 6720 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 6721 v0.AuxInt = 2 6722 v0.AddArg(i) 6723 v.AddArg(v0) 6724 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 6725 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 6726 v.AddArg(v1) 6727 v.AddArg(mem) 6728 return true 6729 } 6730 return false 6731 } 6732 func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool { 6733 b := v.Block 6734 _ = b 6735 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 6736 // cond: 6737 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 6738 for { 6739 c := v.AuxInt 6740 sym := v.Aux 6741 ptr := v.Args[0] 6742 v_1 := v.Args[1] 6743 if v_1.Op != OpAMD64SHLQconst { 6744 break 6745 } 6746 if v_1.AuxInt != 2 { 6747 break 6748 } 6749 idx := v_1.Args[0] 6750 val := v.Args[2] 6751 mem := v.Args[3] 6752 v.reset(OpAMD64MOVLstoreidx4) 6753 v.AuxInt = c 6754 v.Aux = sym 6755 v.AddArg(ptr) 6756 v.AddArg(idx) 6757 v.AddArg(val) 6758 v.AddArg(mem) 6759 return true 6760 } 6761 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6762 // cond: 6763 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 6764 for { 6765 c := v.AuxInt 6766 sym := v.Aux 6767 v_0 := v.Args[0] 6768 if v_0.Op != OpAMD64ADDQconst { 6769 break 6770 } 6771 d := v_0.AuxInt 6772 ptr := v_0.Args[0] 6773 idx := v.Args[1] 6774 val := v.Args[2] 6775 mem := v.Args[3] 6776 v.reset(OpAMD64MOVLstoreidx1) 6777 v.AuxInt = c + d 6778 v.Aux = sym 6779 v.AddArg(ptr) 6780 v.AddArg(idx) 6781 v.AddArg(val) 6782 v.AddArg(mem) 6783 return true 6784 } 6785 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6786 // cond: 6787 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 6788 for { 6789 c := v.AuxInt 6790 sym := v.Aux 6791 ptr := v.Args[0] 6792 v_1 := v.Args[1] 6793 if v_1.Op != OpAMD64ADDQconst { 6794 break 6795 } 6796 d := v_1.AuxInt 6797 idx := v_1.Args[0] 6798 val := v.Args[2] 6799 mem := v.Args[3] 6800 v.reset(OpAMD64MOVLstoreidx1) 6801 v.AuxInt = c + d 6802 v.Aux = sym 6803 v.AddArg(ptr) 6804 v.AddArg(idx) 6805 v.AddArg(val) 6806 v.AddArg(mem) 6807 return true 6808 } 6809 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 6810 // cond: x.Uses == 1 && clobber(x) 6811 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 6812 for { 6813 i := v.AuxInt 6814 s := v.Aux 6815 p := v.Args[0] 6816 idx := v.Args[1] 6817 v_2 := v.Args[2] 6818 if v_2.Op != OpAMD64SHRQconst { 6819 break 6820 } 6821 if v_2.AuxInt != 32 { 6822 break 6823 } 6824 w := v_2.Args[0] 6825 x := v.Args[3] 6826 if x.Op != OpAMD64MOVLstoreidx1 { 6827 break 6828 } 6829 if x.AuxInt != i-4 { 6830 break 6831 } 6832 if x.Aux != s { 6833 break 6834 } 6835 if p != x.Args[0] { 6836 break 6837 } 6838 if idx != x.Args[1] { 6839 break 6840 } 6841 if w != x.Args[2] { 6842 break 6843 } 6844 mem := x.Args[3] 6845 if !(x.Uses == 1 && clobber(x)) { 6846 break 6847 } 6848 v.reset(OpAMD64MOVQstoreidx1) 6849 v.AuxInt = i - 4 6850 v.Aux = s 6851 v.AddArg(p) 6852 v.AddArg(idx) 6853 v.AddArg(w) 6854 v.AddArg(mem) 6855 return true 6856 } 6857 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 6858 // cond: x.Uses == 1 && clobber(x) 6859 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 6860 for { 6861 i := v.AuxInt 6862 s := v.Aux 6863 p := v.Args[0] 6864 idx := v.Args[1] 6865 v_2 := v.Args[2] 6866 if v_2.Op != OpAMD64SHRQconst { 6867 break 6868 } 6869 j := v_2.AuxInt 6870 w := v_2.Args[0] 6871 x := v.Args[3] 6872 if x.Op != OpAMD64MOVLstoreidx1 { 6873 break 6874 } 6875 if x.AuxInt != i-4 { 6876 break 6877 } 6878 if x.Aux != s { 6879 break 6880 } 6881 if p != x.Args[0] { 6882 break 6883 } 6884 if idx != x.Args[1] { 6885 break 6886 } 6887 w0 := x.Args[2] 6888 if w0.Op != OpAMD64SHRQconst { 6889 break 6890 } 6891 if w0.AuxInt != j-32 { 6892 break 6893 } 6894 if w != w0.Args[0] { 6895 break 6896 } 6897 mem := x.Args[3] 6898 if !(x.Uses == 1 && clobber(x)) { 6899 break 6900 } 6901 v.reset(OpAMD64MOVQstoreidx1) 6902 v.AuxInt = i - 4 6903 v.Aux = s 6904 v.AddArg(p) 6905 v.AddArg(idx) 6906 v.AddArg(w0) 6907 v.AddArg(mem) 6908 return true 6909 } 6910 return false 6911 } 6912 func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool { 6913 b := v.Block 6914 _ = b 6915 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6916 // cond: 6917 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 6918 for { 6919 c := v.AuxInt 6920 sym := v.Aux 6921 v_0 := v.Args[0] 6922 if v_0.Op != OpAMD64ADDQconst { 6923 break 6924 } 6925 d := v_0.AuxInt 6926 ptr := v_0.Args[0] 6927 idx := v.Args[1] 6928 val := v.Args[2] 6929 mem := v.Args[3] 6930 v.reset(OpAMD64MOVLstoreidx4) 6931 v.AuxInt = c + d 6932 v.Aux = sym 6933 v.AddArg(ptr) 6934 v.AddArg(idx) 6935 v.AddArg(val) 6936 v.AddArg(mem) 6937 return true 6938 } 6939 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6940 // cond: 6941 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 6942 for { 6943 c := v.AuxInt 6944 sym := v.Aux 6945 ptr := v.Args[0] 6946 v_1 := v.Args[1] 6947 if v_1.Op != OpAMD64ADDQconst { 6948 break 6949 } 6950 d := v_1.AuxInt 6951 idx := v_1.Args[0] 6952 val := v.Args[2] 6953 mem := v.Args[3] 6954 v.reset(OpAMD64MOVLstoreidx4) 6955 v.AuxInt = c + 4*d 6956 v.Aux = sym 6957 v.AddArg(ptr) 6958 v.AddArg(idx) 6959 v.AddArg(val) 6960 v.AddArg(mem) 6961 return true 6962 } 6963 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 6964 // cond: x.Uses == 1 && clobber(x) 6965 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 6966 for { 6967 i := v.AuxInt 6968 s := v.Aux 6969 p := v.Args[0] 6970 idx := v.Args[1] 6971 v_2 := v.Args[2] 6972 if v_2.Op != OpAMD64SHRQconst { 6973 break 6974 } 6975 if v_2.AuxInt != 32 { 6976 break 6977 } 6978 w := v_2.Args[0] 6979 x := v.Args[3] 6980 if x.Op != OpAMD64MOVLstoreidx4 { 6981 break 6982 } 6983 if x.AuxInt != i-4 { 6984 break 6985 } 6986 if x.Aux != s { 6987 break 6988 } 6989 if p != x.Args[0] { 6990 break 6991 } 6992 if idx != x.Args[1] { 6993 break 6994 } 6995 if w != x.Args[2] { 6996 break 6997 } 6998 mem := x.Args[3] 6999 if !(x.Uses == 1 && clobber(x)) { 7000 break 7001 } 7002 v.reset(OpAMD64MOVQstoreidx1) 7003 v.AuxInt = i - 4 7004 v.Aux = s 7005 v.AddArg(p) 7006 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 7007 v0.AuxInt = 2 7008 v0.AddArg(idx) 7009 v.AddArg(v0) 7010 v.AddArg(w) 7011 v.AddArg(mem) 7012 return true 7013 } 7014 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 7015 // cond: x.Uses == 1 && clobber(x) 7016 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 7017 for { 7018 i := v.AuxInt 7019 s := v.Aux 7020 p := v.Args[0] 7021 idx := v.Args[1] 7022 v_2 := v.Args[2] 7023 if v_2.Op != OpAMD64SHRQconst { 7024 break 7025 } 7026 j := v_2.AuxInt 7027 w := v_2.Args[0] 7028 x := v.Args[3] 7029 if x.Op != OpAMD64MOVLstoreidx4 { 7030 break 7031 } 7032 if x.AuxInt != i-4 { 7033 break 7034 } 7035 if x.Aux != s { 7036 break 7037 } 7038 if p != x.Args[0] { 7039 break 7040 } 7041 if idx != x.Args[1] { 7042 break 7043 } 7044 w0 := x.Args[2] 7045 if w0.Op != OpAMD64SHRQconst { 7046 break 7047 } 7048 if w0.AuxInt != j-32 { 7049 break 7050 } 7051 if w != w0.Args[0] { 7052 break 7053 } 7054 mem := x.Args[3] 7055 if !(x.Uses == 1 && clobber(x)) { 7056 break 7057 } 7058 v.reset(OpAMD64MOVQstoreidx1) 7059 v.AuxInt = i - 4 7060 v.Aux = s 7061 v.AddArg(p) 7062 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 7063 v0.AuxInt = 2 7064 v0.AddArg(idx) 7065 v.AddArg(v0) 7066 v.AddArg(w0) 7067 v.AddArg(mem) 7068 return true 7069 } 7070 return false 7071 } 7072 func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool { 7073 b := v.Block 7074 _ = b 7075 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 7076 // cond: is32Bit(off1+off2) 7077 // result: (MOVOload [off1+off2] {sym} ptr mem) 7078 for { 7079 off1 := v.AuxInt 7080 sym := v.Aux 7081 v_0 := v.Args[0] 7082 if v_0.Op != OpAMD64ADDQconst { 7083 break 7084 } 7085 off2 := v_0.AuxInt 7086 ptr := v_0.Args[0] 7087 mem := v.Args[1] 7088 if !(is32Bit(off1 + off2)) { 7089 break 7090 } 7091 v.reset(OpAMD64MOVOload) 7092 v.AuxInt = off1 + off2 7093 v.Aux = sym 7094 v.AddArg(ptr) 7095 v.AddArg(mem) 7096 return true 7097 } 7098 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7099 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7100 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7101 for { 7102 off1 := v.AuxInt 7103 sym1 := v.Aux 7104 v_0 := v.Args[0] 7105 if v_0.Op != OpAMD64LEAQ { 7106 break 7107 } 7108 off2 := v_0.AuxInt 7109 sym2 := v_0.Aux 7110 base := v_0.Args[0] 7111 mem := v.Args[1] 7112 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7113 break 7114 } 7115 v.reset(OpAMD64MOVOload) 7116 v.AuxInt = off1 + off2 7117 v.Aux = mergeSym(sym1, sym2) 7118 v.AddArg(base) 7119 v.AddArg(mem) 7120 return true 7121 } 7122 return false 7123 } 7124 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool { 7125 b := v.Block 7126 _ = b 7127 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7128 // cond: is32Bit(off1+off2) 7129 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 7130 for { 7131 off1 := v.AuxInt 7132 sym := v.Aux 7133 v_0 := v.Args[0] 7134 if v_0.Op != OpAMD64ADDQconst { 7135 break 7136 } 7137 off2 := v_0.AuxInt 7138 ptr := v_0.Args[0] 7139 val := v.Args[1] 7140 mem := v.Args[2] 7141 if !(is32Bit(off1 + off2)) { 7142 break 7143 } 7144 v.reset(OpAMD64MOVOstore) 7145 v.AuxInt = off1 + off2 7146 v.Aux = sym 7147 v.AddArg(ptr) 7148 v.AddArg(val) 7149 v.AddArg(mem) 7150 return true 7151 } 7152 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7153 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7154 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7155 for { 7156 off1 := v.AuxInt 7157 sym1 := v.Aux 7158 v_0 := v.Args[0] 7159 if v_0.Op != OpAMD64LEAQ { 7160 break 7161 } 7162 off2 := v_0.AuxInt 7163 sym2 := v_0.Aux 7164 base := v_0.Args[0] 7165 val := v.Args[1] 7166 mem := v.Args[2] 7167 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7168 break 7169 } 7170 v.reset(OpAMD64MOVOstore) 7171 v.AuxInt = off1 + off2 7172 v.Aux = mergeSym(sym1, sym2) 7173 v.AddArg(base) 7174 v.AddArg(val) 7175 v.AddArg(mem) 7176 return true 7177 } 7178 return false 7179 } 7180 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value, config *Config) bool { 7181 b := v.Block 7182 _ = b 7183 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 7184 // cond: is32Bit(off1+off2) 7185 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 7186 for { 7187 off1 := v.AuxInt 7188 sym := v.Aux 7189 v_0 := v.Args[0] 7190 if v_0.Op != OpAMD64ADDQconst { 7191 break 7192 } 7193 off2 := v_0.AuxInt 7194 ptr := v_0.Args[0] 7195 mem := v.Args[1] 7196 if !(is32Bit(off1 + off2)) { 7197 break 7198 } 7199 v.reset(OpAMD64MOVQatomicload) 7200 v.AuxInt = off1 + off2 7201 v.Aux = sym 7202 v.AddArg(ptr) 7203 v.AddArg(mem) 7204 return true 7205 } 7206 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 7207 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7208 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 7209 for { 7210 off1 := v.AuxInt 7211 sym1 := v.Aux 7212 v_0 := v.Args[0] 7213 if v_0.Op != OpAMD64LEAQ { 7214 break 7215 } 7216 off2 := v_0.AuxInt 7217 sym2 := v_0.Aux 7218 ptr := v_0.Args[0] 7219 mem := v.Args[1] 7220 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7221 break 7222 } 7223 v.reset(OpAMD64MOVQatomicload) 7224 v.AuxInt = off1 + off2 7225 v.Aux = mergeSym(sym1, sym2) 7226 v.AddArg(ptr) 7227 v.AddArg(mem) 7228 return true 7229 } 7230 return false 7231 } 7232 func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool { 7233 b := v.Block 7234 _ = b 7235 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 7236 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 7237 // result: x 7238 for { 7239 off := v.AuxInt 7240 sym := v.Aux 7241 ptr := v.Args[0] 7242 v_1 := v.Args[1] 7243 if v_1.Op != OpAMD64MOVQstore { 7244 break 7245 } 7246 off2 := v_1.AuxInt 7247 sym2 := v_1.Aux 7248 ptr2 := v_1.Args[0] 7249 x := v_1.Args[1] 7250 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 7251 break 7252 } 7253 v.reset(OpCopy) 7254 v.Type = x.Type 7255 v.AddArg(x) 7256 return true 7257 } 7258 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 7259 // cond: is32Bit(off1+off2) 7260 // result: (MOVQload [off1+off2] {sym} ptr mem) 7261 for { 7262 off1 := v.AuxInt 7263 sym := v.Aux 7264 v_0 := v.Args[0] 7265 if v_0.Op != OpAMD64ADDQconst { 7266 break 7267 } 7268 off2 := v_0.AuxInt 7269 ptr := v_0.Args[0] 7270 mem := v.Args[1] 7271 if !(is32Bit(off1 + off2)) { 7272 break 7273 } 7274 v.reset(OpAMD64MOVQload) 7275 v.AuxInt = off1 + off2 7276 v.Aux = sym 7277 v.AddArg(ptr) 7278 v.AddArg(mem) 7279 return true 7280 } 7281 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7282 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7283 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7284 for { 7285 off1 := v.AuxInt 7286 sym1 := v.Aux 7287 v_0 := v.Args[0] 7288 if v_0.Op != OpAMD64LEAQ { 7289 break 7290 } 7291 off2 := v_0.AuxInt 7292 sym2 := v_0.Aux 7293 base := v_0.Args[0] 7294 mem := v.Args[1] 7295 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7296 break 7297 } 7298 v.reset(OpAMD64MOVQload) 7299 v.AuxInt = off1 + off2 7300 v.Aux = mergeSym(sym1, sym2) 7301 v.AddArg(base) 7302 v.AddArg(mem) 7303 return true 7304 } 7305 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7306 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7307 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7308 for { 7309 off1 := v.AuxInt 7310 sym1 := v.Aux 7311 v_0 := v.Args[0] 7312 if v_0.Op != OpAMD64LEAQ1 { 7313 break 7314 } 7315 off2 := v_0.AuxInt 7316 sym2 := v_0.Aux 7317 ptr := v_0.Args[0] 7318 idx := v_0.Args[1] 7319 mem := v.Args[1] 7320 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7321 break 7322 } 7323 v.reset(OpAMD64MOVQloadidx1) 7324 v.AuxInt = off1 + off2 7325 v.Aux = mergeSym(sym1, sym2) 7326 v.AddArg(ptr) 7327 v.AddArg(idx) 7328 v.AddArg(mem) 7329 return true 7330 } 7331 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 7332 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7333 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7334 for { 7335 off1 := v.AuxInt 7336 sym1 := v.Aux 7337 v_0 := v.Args[0] 7338 if v_0.Op != OpAMD64LEAQ8 { 7339 break 7340 } 7341 off2 := v_0.AuxInt 7342 sym2 := v_0.Aux 7343 ptr := v_0.Args[0] 7344 idx := v_0.Args[1] 7345 mem := v.Args[1] 7346 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7347 break 7348 } 7349 v.reset(OpAMD64MOVQloadidx8) 7350 v.AuxInt = off1 + off2 7351 v.Aux = mergeSym(sym1, sym2) 7352 v.AddArg(ptr) 7353 v.AddArg(idx) 7354 v.AddArg(mem) 7355 return true 7356 } 7357 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 7358 // cond: ptr.Op != OpSB 7359 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 7360 for { 7361 off := v.AuxInt 7362 sym := v.Aux 7363 v_0 := v.Args[0] 7364 if v_0.Op != OpAMD64ADDQ { 7365 break 7366 } 7367 ptr := v_0.Args[0] 7368 idx := v_0.Args[1] 7369 mem := v.Args[1] 7370 if !(ptr.Op != OpSB) { 7371 break 7372 } 7373 v.reset(OpAMD64MOVQloadidx1) 7374 v.AuxInt = off 7375 v.Aux = sym 7376 v.AddArg(ptr) 7377 v.AddArg(idx) 7378 v.AddArg(mem) 7379 return true 7380 } 7381 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 7382 // cond: canMergeSym(sym1, sym2) 7383 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7384 for { 7385 off1 := v.AuxInt 7386 sym1 := v.Aux 7387 v_0 := v.Args[0] 7388 if v_0.Op != OpAMD64LEAL { 7389 break 7390 } 7391 off2 := v_0.AuxInt 7392 sym2 := v_0.Aux 7393 base := v_0.Args[0] 7394 mem := v.Args[1] 7395 if !(canMergeSym(sym1, sym2)) { 7396 break 7397 } 7398 v.reset(OpAMD64MOVQload) 7399 v.AuxInt = off1 + off2 7400 v.Aux = mergeSym(sym1, sym2) 7401 v.AddArg(base) 7402 v.AddArg(mem) 7403 return true 7404 } 7405 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 7406 // cond: is32Bit(off1+off2) 7407 // result: (MOVQload [off1+off2] {sym} ptr mem) 7408 for { 7409 off1 := v.AuxInt 7410 sym := v.Aux 7411 v_0 := v.Args[0] 7412 if v_0.Op != OpAMD64ADDLconst { 7413 break 7414 } 7415 off2 := v_0.AuxInt 7416 ptr := v_0.Args[0] 7417 mem := v.Args[1] 7418 if !(is32Bit(off1 + off2)) { 7419 break 7420 } 7421 v.reset(OpAMD64MOVQload) 7422 v.AuxInt = off1 + off2 7423 v.Aux = sym 7424 v.AddArg(ptr) 7425 v.AddArg(mem) 7426 return true 7427 } 7428 return false 7429 } 7430 func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool { 7431 b := v.Block 7432 _ = b 7433 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7434 // cond: 7435 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 7436 for { 7437 c := v.AuxInt 7438 sym := v.Aux 7439 ptr := v.Args[0] 7440 v_1 := v.Args[1] 7441 if v_1.Op != OpAMD64SHLQconst { 7442 break 7443 } 7444 if v_1.AuxInt != 3 { 7445 break 7446 } 7447 idx := v_1.Args[0] 7448 mem := v.Args[2] 7449 v.reset(OpAMD64MOVQloadidx8) 7450 v.AuxInt = c 7451 v.Aux = sym 7452 v.AddArg(ptr) 7453 v.AddArg(idx) 7454 v.AddArg(mem) 7455 return true 7456 } 7457 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7458 // cond: 7459 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 7460 for { 7461 c := v.AuxInt 7462 sym := v.Aux 7463 v_0 := v.Args[0] 7464 if v_0.Op != OpAMD64ADDQconst { 7465 break 7466 } 7467 d := v_0.AuxInt 7468 ptr := v_0.Args[0] 7469 idx := v.Args[1] 7470 mem := v.Args[2] 7471 v.reset(OpAMD64MOVQloadidx1) 7472 v.AuxInt = c + d 7473 v.Aux = sym 7474 v.AddArg(ptr) 7475 v.AddArg(idx) 7476 v.AddArg(mem) 7477 return true 7478 } 7479 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7480 // cond: 7481 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 7482 for { 7483 c := v.AuxInt 7484 sym := v.Aux 7485 ptr := v.Args[0] 7486 v_1 := v.Args[1] 7487 if v_1.Op != OpAMD64ADDQconst { 7488 break 7489 } 7490 d := v_1.AuxInt 7491 idx := v_1.Args[0] 7492 mem := v.Args[2] 7493 v.reset(OpAMD64MOVQloadidx1) 7494 v.AuxInt = c + d 7495 v.Aux = sym 7496 v.AddArg(ptr) 7497 v.AddArg(idx) 7498 v.AddArg(mem) 7499 return true 7500 } 7501 return false 7502 } 7503 func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool { 7504 b := v.Block 7505 _ = b 7506 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 7507 // cond: 7508 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 7509 for { 7510 c := v.AuxInt 7511 sym := v.Aux 7512 v_0 := v.Args[0] 7513 if v_0.Op != OpAMD64ADDQconst { 7514 break 7515 } 7516 d := v_0.AuxInt 7517 ptr := v_0.Args[0] 7518 idx := v.Args[1] 7519 mem := v.Args[2] 7520 v.reset(OpAMD64MOVQloadidx8) 7521 v.AuxInt = c + d 7522 v.Aux = sym 7523 v.AddArg(ptr) 7524 v.AddArg(idx) 7525 v.AddArg(mem) 7526 return true 7527 } 7528 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 7529 // cond: 7530 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 7531 for { 7532 c := v.AuxInt 7533 sym := v.Aux 7534 ptr := v.Args[0] 7535 v_1 := v.Args[1] 7536 if v_1.Op != OpAMD64ADDQconst { 7537 break 7538 } 7539 d := v_1.AuxInt 7540 idx := v_1.Args[0] 7541 mem := v.Args[2] 7542 v.reset(OpAMD64MOVQloadidx8) 7543 v.AuxInt = c + 8*d 7544 v.Aux = sym 7545 v.AddArg(ptr) 7546 v.AddArg(idx) 7547 v.AddArg(mem) 7548 return true 7549 } 7550 return false 7551 } 7552 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool { 7553 b := v.Block 7554 _ = b 7555 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7556 // cond: is32Bit(off1+off2) 7557 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 7558 for { 7559 off1 := v.AuxInt 7560 sym := v.Aux 7561 v_0 := v.Args[0] 7562 if v_0.Op != OpAMD64ADDQconst { 7563 break 7564 } 7565 off2 := v_0.AuxInt 7566 ptr := v_0.Args[0] 7567 val := v.Args[1] 7568 mem := v.Args[2] 7569 if !(is32Bit(off1 + off2)) { 7570 break 7571 } 7572 v.reset(OpAMD64MOVQstore) 7573 v.AuxInt = off1 + off2 7574 v.Aux = sym 7575 v.AddArg(ptr) 7576 v.AddArg(val) 7577 v.AddArg(mem) 7578 return true 7579 } 7580 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 7581 // cond: validValAndOff(c,off) 7582 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 7583 for { 7584 off := v.AuxInt 7585 sym := v.Aux 7586 ptr := v.Args[0] 7587 v_1 := v.Args[1] 7588 if v_1.Op != OpAMD64MOVQconst { 7589 break 7590 } 7591 c := v_1.AuxInt 7592 mem := v.Args[2] 7593 if !(validValAndOff(c, off)) { 7594 break 7595 } 7596 v.reset(OpAMD64MOVQstoreconst) 7597 v.AuxInt = makeValAndOff(c, off) 7598 v.Aux = sym 7599 v.AddArg(ptr) 7600 v.AddArg(mem) 7601 return true 7602 } 7603 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7604 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7605 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7606 for { 7607 off1 := v.AuxInt 7608 sym1 := v.Aux 7609 v_0 := v.Args[0] 7610 if v_0.Op != OpAMD64LEAQ { 7611 break 7612 } 7613 off2 := v_0.AuxInt 7614 sym2 := v_0.Aux 7615 base := v_0.Args[0] 7616 val := v.Args[1] 7617 mem := v.Args[2] 7618 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7619 break 7620 } 7621 v.reset(OpAMD64MOVQstore) 7622 v.AuxInt = off1 + off2 7623 v.Aux = mergeSym(sym1, sym2) 7624 v.AddArg(base) 7625 v.AddArg(val) 7626 v.AddArg(mem) 7627 return true 7628 } 7629 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7630 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7631 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7632 for { 7633 off1 := v.AuxInt 7634 sym1 := v.Aux 7635 v_0 := v.Args[0] 7636 if v_0.Op != OpAMD64LEAQ1 { 7637 break 7638 } 7639 off2 := v_0.AuxInt 7640 sym2 := v_0.Aux 7641 ptr := v_0.Args[0] 7642 idx := v_0.Args[1] 7643 val := v.Args[1] 7644 mem := v.Args[2] 7645 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7646 break 7647 } 7648 v.reset(OpAMD64MOVQstoreidx1) 7649 v.AuxInt = off1 + off2 7650 v.Aux = mergeSym(sym1, sym2) 7651 v.AddArg(ptr) 7652 v.AddArg(idx) 7653 v.AddArg(val) 7654 v.AddArg(mem) 7655 return true 7656 } 7657 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 7658 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7659 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7660 for { 7661 off1 := v.AuxInt 7662 sym1 := v.Aux 7663 v_0 := v.Args[0] 7664 if v_0.Op != OpAMD64LEAQ8 { 7665 break 7666 } 7667 off2 := v_0.AuxInt 7668 sym2 := v_0.Aux 7669 ptr := v_0.Args[0] 7670 idx := v_0.Args[1] 7671 val := v.Args[1] 7672 mem := v.Args[2] 7673 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7674 break 7675 } 7676 v.reset(OpAMD64MOVQstoreidx8) 7677 v.AuxInt = off1 + off2 7678 v.Aux = mergeSym(sym1, sym2) 7679 v.AddArg(ptr) 7680 v.AddArg(idx) 7681 v.AddArg(val) 7682 v.AddArg(mem) 7683 return true 7684 } 7685 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 7686 // cond: ptr.Op != OpSB 7687 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 7688 for { 7689 off := v.AuxInt 7690 sym := v.Aux 7691 v_0 := v.Args[0] 7692 if v_0.Op != OpAMD64ADDQ { 7693 break 7694 } 7695 ptr := v_0.Args[0] 7696 idx := v_0.Args[1] 7697 val := v.Args[1] 7698 mem := v.Args[2] 7699 if !(ptr.Op != OpSB) { 7700 break 7701 } 7702 v.reset(OpAMD64MOVQstoreidx1) 7703 v.AuxInt = off 7704 v.Aux = sym 7705 v.AddArg(ptr) 7706 v.AddArg(idx) 7707 v.AddArg(val) 7708 v.AddArg(mem) 7709 return true 7710 } 7711 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 7712 // cond: canMergeSym(sym1, sym2) 7713 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7714 for { 7715 off1 := v.AuxInt 7716 sym1 := v.Aux 7717 v_0 := v.Args[0] 7718 if v_0.Op != OpAMD64LEAL { 7719 break 7720 } 7721 off2 := v_0.AuxInt 7722 sym2 := v_0.Aux 7723 base := v_0.Args[0] 7724 val := v.Args[1] 7725 mem := v.Args[2] 7726 if !(canMergeSym(sym1, sym2)) { 7727 break 7728 } 7729 v.reset(OpAMD64MOVQstore) 7730 v.AuxInt = off1 + off2 7731 v.Aux = mergeSym(sym1, sym2) 7732 v.AddArg(base) 7733 v.AddArg(val) 7734 v.AddArg(mem) 7735 return true 7736 } 7737 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 7738 // cond: is32Bit(off1+off2) 7739 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 7740 for { 7741 off1 := v.AuxInt 7742 sym := v.Aux 7743 v_0 := v.Args[0] 7744 if v_0.Op != OpAMD64ADDLconst { 7745 break 7746 } 7747 off2 := v_0.AuxInt 7748 ptr := v_0.Args[0] 7749 val := v.Args[1] 7750 mem := v.Args[2] 7751 if !(is32Bit(off1 + off2)) { 7752 break 7753 } 7754 v.reset(OpAMD64MOVQstore) 7755 v.AuxInt = off1 + off2 7756 v.Aux = sym 7757 v.AddArg(ptr) 7758 v.AddArg(val) 7759 v.AddArg(mem) 7760 return true 7761 } 7762 return false 7763 } 7764 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool { 7765 b := v.Block 7766 _ = b 7767 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 7768 // cond: ValAndOff(sc).canAdd(off) 7769 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7770 for { 7771 sc := v.AuxInt 7772 s := v.Aux 7773 v_0 := v.Args[0] 7774 if v_0.Op != OpAMD64ADDQconst { 7775 break 7776 } 7777 off := v_0.AuxInt 7778 ptr := v_0.Args[0] 7779 mem := v.Args[1] 7780 if !(ValAndOff(sc).canAdd(off)) { 7781 break 7782 } 7783 v.reset(OpAMD64MOVQstoreconst) 7784 v.AuxInt = ValAndOff(sc).add(off) 7785 v.Aux = s 7786 v.AddArg(ptr) 7787 v.AddArg(mem) 7788 return true 7789 } 7790 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 7791 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7792 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7793 for { 7794 sc := v.AuxInt 7795 sym1 := v.Aux 7796 v_0 := v.Args[0] 7797 if v_0.Op != OpAMD64LEAQ { 7798 break 7799 } 7800 off := v_0.AuxInt 7801 sym2 := v_0.Aux 7802 ptr := v_0.Args[0] 7803 mem := v.Args[1] 7804 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7805 break 7806 } 7807 v.reset(OpAMD64MOVQstoreconst) 7808 v.AuxInt = ValAndOff(sc).add(off) 7809 v.Aux = mergeSym(sym1, sym2) 7810 v.AddArg(ptr) 7811 v.AddArg(mem) 7812 return true 7813 } 7814 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 7815 // cond: canMergeSym(sym1, sym2) 7816 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7817 for { 7818 x := v.AuxInt 7819 sym1 := v.Aux 7820 v_0 := v.Args[0] 7821 if v_0.Op != OpAMD64LEAQ1 { 7822 break 7823 } 7824 off := v_0.AuxInt 7825 sym2 := v_0.Aux 7826 ptr := v_0.Args[0] 7827 idx := v_0.Args[1] 7828 mem := v.Args[1] 7829 if !(canMergeSym(sym1, sym2)) { 7830 break 7831 } 7832 v.reset(OpAMD64MOVQstoreconstidx1) 7833 v.AuxInt = ValAndOff(x).add(off) 7834 v.Aux = mergeSym(sym1, sym2) 7835 v.AddArg(ptr) 7836 v.AddArg(idx) 7837 v.AddArg(mem) 7838 return true 7839 } 7840 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 7841 // cond: canMergeSym(sym1, sym2) 7842 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7843 for { 7844 x := v.AuxInt 7845 sym1 := v.Aux 7846 v_0 := v.Args[0] 7847 if v_0.Op != OpAMD64LEAQ8 { 7848 break 7849 } 7850 off := v_0.AuxInt 7851 sym2 := v_0.Aux 7852 ptr := v_0.Args[0] 7853 idx := v_0.Args[1] 7854 mem := v.Args[1] 7855 if !(canMergeSym(sym1, sym2)) { 7856 break 7857 } 7858 v.reset(OpAMD64MOVQstoreconstidx8) 7859 v.AuxInt = ValAndOff(x).add(off) 7860 v.Aux = mergeSym(sym1, sym2) 7861 v.AddArg(ptr) 7862 v.AddArg(idx) 7863 v.AddArg(mem) 7864 return true 7865 } 7866 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 7867 // cond: 7868 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 7869 for { 7870 x := v.AuxInt 7871 sym := v.Aux 7872 v_0 := v.Args[0] 7873 if v_0.Op != OpAMD64ADDQ { 7874 break 7875 } 7876 ptr := v_0.Args[0] 7877 idx := v_0.Args[1] 7878 mem := v.Args[1] 7879 v.reset(OpAMD64MOVQstoreconstidx1) 7880 v.AuxInt = x 7881 v.Aux = sym 7882 v.AddArg(ptr) 7883 v.AddArg(idx) 7884 v.AddArg(mem) 7885 return true 7886 } 7887 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 7888 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7889 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7890 for { 7891 sc := v.AuxInt 7892 sym1 := v.Aux 7893 v_0 := v.Args[0] 7894 if v_0.Op != OpAMD64LEAL { 7895 break 7896 } 7897 off := v_0.AuxInt 7898 sym2 := v_0.Aux 7899 ptr := v_0.Args[0] 7900 mem := v.Args[1] 7901 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7902 break 7903 } 7904 v.reset(OpAMD64MOVQstoreconst) 7905 v.AuxInt = ValAndOff(sc).add(off) 7906 v.Aux = mergeSym(sym1, sym2) 7907 v.AddArg(ptr) 7908 v.AddArg(mem) 7909 return true 7910 } 7911 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 7912 // cond: ValAndOff(sc).canAdd(off) 7913 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7914 for { 7915 sc := v.AuxInt 7916 s := v.Aux 7917 v_0 := v.Args[0] 7918 if v_0.Op != OpAMD64ADDLconst { 7919 break 7920 } 7921 off := v_0.AuxInt 7922 ptr := v_0.Args[0] 7923 mem := v.Args[1] 7924 if !(ValAndOff(sc).canAdd(off)) { 7925 break 7926 } 7927 v.reset(OpAMD64MOVQstoreconst) 7928 v.AuxInt = ValAndOff(sc).add(off) 7929 v.Aux = s 7930 v.AddArg(ptr) 7931 v.AddArg(mem) 7932 return true 7933 } 7934 return false 7935 } 7936 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool { 7937 b := v.Block 7938 _ = b 7939 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7940 // cond: 7941 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 7942 for { 7943 c := v.AuxInt 7944 sym := v.Aux 7945 ptr := v.Args[0] 7946 v_1 := v.Args[1] 7947 if v_1.Op != OpAMD64SHLQconst { 7948 break 7949 } 7950 if v_1.AuxInt != 3 { 7951 break 7952 } 7953 idx := v_1.Args[0] 7954 mem := v.Args[2] 7955 v.reset(OpAMD64MOVQstoreconstidx8) 7956 v.AuxInt = c 7957 v.Aux = sym 7958 v.AddArg(ptr) 7959 v.AddArg(idx) 7960 v.AddArg(mem) 7961 return true 7962 } 7963 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 7964 // cond: 7965 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7966 for { 7967 x := v.AuxInt 7968 sym := v.Aux 7969 v_0 := v.Args[0] 7970 if v_0.Op != OpAMD64ADDQconst { 7971 break 7972 } 7973 c := v_0.AuxInt 7974 ptr := v_0.Args[0] 7975 idx := v.Args[1] 7976 mem := v.Args[2] 7977 v.reset(OpAMD64MOVQstoreconstidx1) 7978 v.AuxInt = ValAndOff(x).add(c) 7979 v.Aux = sym 7980 v.AddArg(ptr) 7981 v.AddArg(idx) 7982 v.AddArg(mem) 7983 return true 7984 } 7985 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 7986 // cond: 7987 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7988 for { 7989 x := v.AuxInt 7990 sym := v.Aux 7991 ptr := v.Args[0] 7992 v_1 := v.Args[1] 7993 if v_1.Op != OpAMD64ADDQconst { 7994 break 7995 } 7996 c := v_1.AuxInt 7997 idx := v_1.Args[0] 7998 mem := v.Args[2] 7999 v.reset(OpAMD64MOVQstoreconstidx1) 8000 v.AuxInt = ValAndOff(x).add(c) 8001 v.Aux = sym 8002 v.AddArg(ptr) 8003 v.AddArg(idx) 8004 v.AddArg(mem) 8005 return true 8006 } 8007 return false 8008 } 8009 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool { 8010 b := v.Block 8011 _ = b 8012 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 8013 // cond: 8014 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8015 for { 8016 x := v.AuxInt 8017 sym := v.Aux 8018 v_0 := v.Args[0] 8019 if v_0.Op != OpAMD64ADDQconst { 8020 break 8021 } 8022 c := v_0.AuxInt 8023 ptr := v_0.Args[0] 8024 idx := v.Args[1] 8025 mem := v.Args[2] 8026 v.reset(OpAMD64MOVQstoreconstidx8) 8027 v.AuxInt = ValAndOff(x).add(c) 8028 v.Aux = sym 8029 v.AddArg(ptr) 8030 v.AddArg(idx) 8031 v.AddArg(mem) 8032 return true 8033 } 8034 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 8035 // cond: 8036 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 8037 for { 8038 x := v.AuxInt 8039 sym := v.Aux 8040 ptr := v.Args[0] 8041 v_1 := v.Args[1] 8042 if v_1.Op != OpAMD64ADDQconst { 8043 break 8044 } 8045 c := v_1.AuxInt 8046 idx := v_1.Args[0] 8047 mem := v.Args[2] 8048 v.reset(OpAMD64MOVQstoreconstidx8) 8049 v.AuxInt = ValAndOff(x).add(8 * c) 8050 v.Aux = sym 8051 v.AddArg(ptr) 8052 v.AddArg(idx) 8053 v.AddArg(mem) 8054 return true 8055 } 8056 return false 8057 } 8058 func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool { 8059 b := v.Block 8060 _ = b 8061 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 8062 // cond: 8063 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 8064 for { 8065 c := v.AuxInt 8066 sym := v.Aux 8067 ptr := v.Args[0] 8068 v_1 := v.Args[1] 8069 if v_1.Op != OpAMD64SHLQconst { 8070 break 8071 } 8072 if v_1.AuxInt != 3 { 8073 break 8074 } 8075 idx := v_1.Args[0] 8076 val := v.Args[2] 8077 mem := v.Args[3] 8078 v.reset(OpAMD64MOVQstoreidx8) 8079 v.AuxInt = c 8080 v.Aux = sym 8081 v.AddArg(ptr) 8082 v.AddArg(idx) 8083 v.AddArg(val) 8084 v.AddArg(mem) 8085 return true 8086 } 8087 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8088 // cond: 8089 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 8090 for { 8091 c := v.AuxInt 8092 sym := v.Aux 8093 v_0 := v.Args[0] 8094 if v_0.Op != OpAMD64ADDQconst { 8095 break 8096 } 8097 d := v_0.AuxInt 8098 ptr := v_0.Args[0] 8099 idx := v.Args[1] 8100 val := v.Args[2] 8101 mem := v.Args[3] 8102 v.reset(OpAMD64MOVQstoreidx1) 8103 v.AuxInt = c + d 8104 v.Aux = sym 8105 v.AddArg(ptr) 8106 v.AddArg(idx) 8107 v.AddArg(val) 8108 v.AddArg(mem) 8109 return true 8110 } 8111 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8112 // cond: 8113 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 8114 for { 8115 c := v.AuxInt 8116 sym := v.Aux 8117 ptr := v.Args[0] 8118 v_1 := v.Args[1] 8119 if v_1.Op != OpAMD64ADDQconst { 8120 break 8121 } 8122 d := v_1.AuxInt 8123 idx := v_1.Args[0] 8124 val := v.Args[2] 8125 mem := v.Args[3] 8126 v.reset(OpAMD64MOVQstoreidx1) 8127 v.AuxInt = c + d 8128 v.Aux = sym 8129 v.AddArg(ptr) 8130 v.AddArg(idx) 8131 v.AddArg(val) 8132 v.AddArg(mem) 8133 return true 8134 } 8135 return false 8136 } 8137 func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool { 8138 b := v.Block 8139 _ = b 8140 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8141 // cond: 8142 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 8143 for { 8144 c := v.AuxInt 8145 sym := v.Aux 8146 v_0 := v.Args[0] 8147 if v_0.Op != OpAMD64ADDQconst { 8148 break 8149 } 8150 d := v_0.AuxInt 8151 ptr := v_0.Args[0] 8152 idx := v.Args[1] 8153 val := v.Args[2] 8154 mem := v.Args[3] 8155 v.reset(OpAMD64MOVQstoreidx8) 8156 v.AuxInt = c + d 8157 v.Aux = sym 8158 v.AddArg(ptr) 8159 v.AddArg(idx) 8160 v.AddArg(val) 8161 v.AddArg(mem) 8162 return true 8163 } 8164 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8165 // cond: 8166 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 8167 for { 8168 c := v.AuxInt 8169 sym := v.Aux 8170 ptr := v.Args[0] 8171 v_1 := v.Args[1] 8172 if v_1.Op != OpAMD64ADDQconst { 8173 break 8174 } 8175 d := v_1.AuxInt 8176 idx := v_1.Args[0] 8177 val := v.Args[2] 8178 mem := v.Args[3] 8179 v.reset(OpAMD64MOVQstoreidx8) 8180 v.AuxInt = c + 8*d 8181 v.Aux = sym 8182 v.AddArg(ptr) 8183 v.AddArg(idx) 8184 v.AddArg(val) 8185 v.AddArg(mem) 8186 return true 8187 } 8188 return false 8189 } 8190 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool { 8191 b := v.Block 8192 _ = b 8193 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 8194 // cond: is32Bit(off1+off2) 8195 // result: (MOVSDload [off1+off2] {sym} ptr mem) 8196 for { 8197 off1 := v.AuxInt 8198 sym := v.Aux 8199 v_0 := v.Args[0] 8200 if v_0.Op != OpAMD64ADDQconst { 8201 break 8202 } 8203 off2 := v_0.AuxInt 8204 ptr := v_0.Args[0] 8205 mem := v.Args[1] 8206 if !(is32Bit(off1 + off2)) { 8207 break 8208 } 8209 v.reset(OpAMD64MOVSDload) 8210 v.AuxInt = off1 + off2 8211 v.Aux = sym 8212 v.AddArg(ptr) 8213 v.AddArg(mem) 8214 return true 8215 } 8216 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8217 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8218 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8219 for { 8220 off1 := v.AuxInt 8221 sym1 := v.Aux 8222 v_0 := v.Args[0] 8223 if v_0.Op != OpAMD64LEAQ { 8224 break 8225 } 8226 off2 := v_0.AuxInt 8227 sym2 := v_0.Aux 8228 base := v_0.Args[0] 8229 mem := v.Args[1] 8230 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8231 break 8232 } 8233 v.reset(OpAMD64MOVSDload) 8234 v.AuxInt = off1 + off2 8235 v.Aux = mergeSym(sym1, sym2) 8236 v.AddArg(base) 8237 v.AddArg(mem) 8238 return true 8239 } 8240 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8241 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8242 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8243 for { 8244 off1 := v.AuxInt 8245 sym1 := v.Aux 8246 v_0 := v.Args[0] 8247 if v_0.Op != OpAMD64LEAQ1 { 8248 break 8249 } 8250 off2 := v_0.AuxInt 8251 sym2 := v_0.Aux 8252 ptr := v_0.Args[0] 8253 idx := v_0.Args[1] 8254 mem := v.Args[1] 8255 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8256 break 8257 } 8258 v.reset(OpAMD64MOVSDloadidx1) 8259 v.AuxInt = off1 + off2 8260 v.Aux = mergeSym(sym1, sym2) 8261 v.AddArg(ptr) 8262 v.AddArg(idx) 8263 v.AddArg(mem) 8264 return true 8265 } 8266 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 8267 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8268 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8269 for { 8270 off1 := v.AuxInt 8271 sym1 := v.Aux 8272 v_0 := v.Args[0] 8273 if v_0.Op != OpAMD64LEAQ8 { 8274 break 8275 } 8276 off2 := v_0.AuxInt 8277 sym2 := v_0.Aux 8278 ptr := v_0.Args[0] 8279 idx := v_0.Args[1] 8280 mem := v.Args[1] 8281 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8282 break 8283 } 8284 v.reset(OpAMD64MOVSDloadidx8) 8285 v.AuxInt = off1 + off2 8286 v.Aux = mergeSym(sym1, sym2) 8287 v.AddArg(ptr) 8288 v.AddArg(idx) 8289 v.AddArg(mem) 8290 return true 8291 } 8292 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 8293 // cond: ptr.Op != OpSB 8294 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 8295 for { 8296 off := v.AuxInt 8297 sym := v.Aux 8298 v_0 := v.Args[0] 8299 if v_0.Op != OpAMD64ADDQ { 8300 break 8301 } 8302 ptr := v_0.Args[0] 8303 idx := v_0.Args[1] 8304 mem := v.Args[1] 8305 if !(ptr.Op != OpSB) { 8306 break 8307 } 8308 v.reset(OpAMD64MOVSDloadidx1) 8309 v.AuxInt = off 8310 v.Aux = sym 8311 v.AddArg(ptr) 8312 v.AddArg(idx) 8313 v.AddArg(mem) 8314 return true 8315 } 8316 return false 8317 } 8318 func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool { 8319 b := v.Block 8320 _ = b 8321 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 8322 // cond: 8323 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 8324 for { 8325 c := v.AuxInt 8326 sym := v.Aux 8327 ptr := v.Args[0] 8328 v_1 := v.Args[1] 8329 if v_1.Op != OpAMD64SHLQconst { 8330 break 8331 } 8332 if v_1.AuxInt != 3 { 8333 break 8334 } 8335 idx := v_1.Args[0] 8336 mem := v.Args[2] 8337 v.reset(OpAMD64MOVSDloadidx8) 8338 v.AuxInt = c 8339 v.Aux = sym 8340 v.AddArg(ptr) 8341 v.AddArg(idx) 8342 v.AddArg(mem) 8343 return true 8344 } 8345 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8346 // cond: 8347 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 8348 for { 8349 c := v.AuxInt 8350 sym := v.Aux 8351 v_0 := v.Args[0] 8352 if v_0.Op != OpAMD64ADDQconst { 8353 break 8354 } 8355 d := v_0.AuxInt 8356 ptr := v_0.Args[0] 8357 idx := v.Args[1] 8358 mem := v.Args[2] 8359 v.reset(OpAMD64MOVSDloadidx1) 8360 v.AuxInt = c + d 8361 v.Aux = sym 8362 v.AddArg(ptr) 8363 v.AddArg(idx) 8364 v.AddArg(mem) 8365 return true 8366 } 8367 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8368 // cond: 8369 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 8370 for { 8371 c := v.AuxInt 8372 sym := v.Aux 8373 ptr := v.Args[0] 8374 v_1 := v.Args[1] 8375 if v_1.Op != OpAMD64ADDQconst { 8376 break 8377 } 8378 d := v_1.AuxInt 8379 idx := v_1.Args[0] 8380 mem := v.Args[2] 8381 v.reset(OpAMD64MOVSDloadidx1) 8382 v.AuxInt = c + d 8383 v.Aux = sym 8384 v.AddArg(ptr) 8385 v.AddArg(idx) 8386 v.AddArg(mem) 8387 return true 8388 } 8389 return false 8390 } 8391 func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool { 8392 b := v.Block 8393 _ = b 8394 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 8395 // cond: 8396 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 8397 for { 8398 c := v.AuxInt 8399 sym := v.Aux 8400 v_0 := v.Args[0] 8401 if v_0.Op != OpAMD64ADDQconst { 8402 break 8403 } 8404 d := v_0.AuxInt 8405 ptr := v_0.Args[0] 8406 idx := v.Args[1] 8407 mem := v.Args[2] 8408 v.reset(OpAMD64MOVSDloadidx8) 8409 v.AuxInt = c + d 8410 v.Aux = sym 8411 v.AddArg(ptr) 8412 v.AddArg(idx) 8413 v.AddArg(mem) 8414 return true 8415 } 8416 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 8417 // cond: 8418 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 8419 for { 8420 c := v.AuxInt 8421 sym := v.Aux 8422 ptr := v.Args[0] 8423 v_1 := v.Args[1] 8424 if v_1.Op != OpAMD64ADDQconst { 8425 break 8426 } 8427 d := v_1.AuxInt 8428 idx := v_1.Args[0] 8429 mem := v.Args[2] 8430 v.reset(OpAMD64MOVSDloadidx8) 8431 v.AuxInt = c + 8*d 8432 v.Aux = sym 8433 v.AddArg(ptr) 8434 v.AddArg(idx) 8435 v.AddArg(mem) 8436 return true 8437 } 8438 return false 8439 } 8440 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool { 8441 b := v.Block 8442 _ = b 8443 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8444 // cond: is32Bit(off1+off2) 8445 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 8446 for { 8447 off1 := v.AuxInt 8448 sym := v.Aux 8449 v_0 := v.Args[0] 8450 if v_0.Op != OpAMD64ADDQconst { 8451 break 8452 } 8453 off2 := v_0.AuxInt 8454 ptr := v_0.Args[0] 8455 val := v.Args[1] 8456 mem := v.Args[2] 8457 if !(is32Bit(off1 + off2)) { 8458 break 8459 } 8460 v.reset(OpAMD64MOVSDstore) 8461 v.AuxInt = off1 + off2 8462 v.Aux = sym 8463 v.AddArg(ptr) 8464 v.AddArg(val) 8465 v.AddArg(mem) 8466 return true 8467 } 8468 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8469 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8470 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8471 for { 8472 off1 := v.AuxInt 8473 sym1 := v.Aux 8474 v_0 := v.Args[0] 8475 if v_0.Op != OpAMD64LEAQ { 8476 break 8477 } 8478 off2 := v_0.AuxInt 8479 sym2 := v_0.Aux 8480 base := v_0.Args[0] 8481 val := v.Args[1] 8482 mem := v.Args[2] 8483 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8484 break 8485 } 8486 v.reset(OpAMD64MOVSDstore) 8487 v.AuxInt = off1 + off2 8488 v.Aux = mergeSym(sym1, sym2) 8489 v.AddArg(base) 8490 v.AddArg(val) 8491 v.AddArg(mem) 8492 return true 8493 } 8494 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8495 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8496 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8497 for { 8498 off1 := v.AuxInt 8499 sym1 := v.Aux 8500 v_0 := v.Args[0] 8501 if v_0.Op != OpAMD64LEAQ1 { 8502 break 8503 } 8504 off2 := v_0.AuxInt 8505 sym2 := v_0.Aux 8506 ptr := v_0.Args[0] 8507 idx := v_0.Args[1] 8508 val := v.Args[1] 8509 mem := v.Args[2] 8510 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8511 break 8512 } 8513 v.reset(OpAMD64MOVSDstoreidx1) 8514 v.AuxInt = off1 + off2 8515 v.Aux = mergeSym(sym1, sym2) 8516 v.AddArg(ptr) 8517 v.AddArg(idx) 8518 v.AddArg(val) 8519 v.AddArg(mem) 8520 return true 8521 } 8522 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 8523 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8524 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8525 for { 8526 off1 := v.AuxInt 8527 sym1 := v.Aux 8528 v_0 := v.Args[0] 8529 if v_0.Op != OpAMD64LEAQ8 { 8530 break 8531 } 8532 off2 := v_0.AuxInt 8533 sym2 := v_0.Aux 8534 ptr := v_0.Args[0] 8535 idx := v_0.Args[1] 8536 val := v.Args[1] 8537 mem := v.Args[2] 8538 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8539 break 8540 } 8541 v.reset(OpAMD64MOVSDstoreidx8) 8542 v.AuxInt = off1 + off2 8543 v.Aux = mergeSym(sym1, sym2) 8544 v.AddArg(ptr) 8545 v.AddArg(idx) 8546 v.AddArg(val) 8547 v.AddArg(mem) 8548 return true 8549 } 8550 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 8551 // cond: ptr.Op != OpSB 8552 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 8553 for { 8554 off := v.AuxInt 8555 sym := v.Aux 8556 v_0 := v.Args[0] 8557 if v_0.Op != OpAMD64ADDQ { 8558 break 8559 } 8560 ptr := v_0.Args[0] 8561 idx := v_0.Args[1] 8562 val := v.Args[1] 8563 mem := v.Args[2] 8564 if !(ptr.Op != OpSB) { 8565 break 8566 } 8567 v.reset(OpAMD64MOVSDstoreidx1) 8568 v.AuxInt = off 8569 v.Aux = sym 8570 v.AddArg(ptr) 8571 v.AddArg(idx) 8572 v.AddArg(val) 8573 v.AddArg(mem) 8574 return true 8575 } 8576 return false 8577 } 8578 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool { 8579 b := v.Block 8580 _ = b 8581 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 8582 // cond: 8583 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 8584 for { 8585 c := v.AuxInt 8586 sym := v.Aux 8587 ptr := v.Args[0] 8588 v_1 := v.Args[1] 8589 if v_1.Op != OpAMD64SHLQconst { 8590 break 8591 } 8592 if v_1.AuxInt != 3 { 8593 break 8594 } 8595 idx := v_1.Args[0] 8596 val := v.Args[2] 8597 mem := v.Args[3] 8598 v.reset(OpAMD64MOVSDstoreidx8) 8599 v.AuxInt = c 8600 v.Aux = sym 8601 v.AddArg(ptr) 8602 v.AddArg(idx) 8603 v.AddArg(val) 8604 v.AddArg(mem) 8605 return true 8606 } 8607 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8608 // cond: 8609 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 8610 for { 8611 c := v.AuxInt 8612 sym := v.Aux 8613 v_0 := v.Args[0] 8614 if v_0.Op != OpAMD64ADDQconst { 8615 break 8616 } 8617 d := v_0.AuxInt 8618 ptr := v_0.Args[0] 8619 idx := v.Args[1] 8620 val := v.Args[2] 8621 mem := v.Args[3] 8622 v.reset(OpAMD64MOVSDstoreidx1) 8623 v.AuxInt = c + d 8624 v.Aux = sym 8625 v.AddArg(ptr) 8626 v.AddArg(idx) 8627 v.AddArg(val) 8628 v.AddArg(mem) 8629 return true 8630 } 8631 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8632 // cond: 8633 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 8634 for { 8635 c := v.AuxInt 8636 sym := v.Aux 8637 ptr := v.Args[0] 8638 v_1 := v.Args[1] 8639 if v_1.Op != OpAMD64ADDQconst { 8640 break 8641 } 8642 d := v_1.AuxInt 8643 idx := v_1.Args[0] 8644 val := v.Args[2] 8645 mem := v.Args[3] 8646 v.reset(OpAMD64MOVSDstoreidx1) 8647 v.AuxInt = c + d 8648 v.Aux = sym 8649 v.AddArg(ptr) 8650 v.AddArg(idx) 8651 v.AddArg(val) 8652 v.AddArg(mem) 8653 return true 8654 } 8655 return false 8656 } 8657 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool { 8658 b := v.Block 8659 _ = b 8660 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8661 // cond: 8662 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 8663 for { 8664 c := v.AuxInt 8665 sym := v.Aux 8666 v_0 := v.Args[0] 8667 if v_0.Op != OpAMD64ADDQconst { 8668 break 8669 } 8670 d := v_0.AuxInt 8671 ptr := v_0.Args[0] 8672 idx := v.Args[1] 8673 val := v.Args[2] 8674 mem := v.Args[3] 8675 v.reset(OpAMD64MOVSDstoreidx8) 8676 v.AuxInt = c + d 8677 v.Aux = sym 8678 v.AddArg(ptr) 8679 v.AddArg(idx) 8680 v.AddArg(val) 8681 v.AddArg(mem) 8682 return true 8683 } 8684 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8685 // cond: 8686 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 8687 for { 8688 c := v.AuxInt 8689 sym := v.Aux 8690 ptr := v.Args[0] 8691 v_1 := v.Args[1] 8692 if v_1.Op != OpAMD64ADDQconst { 8693 break 8694 } 8695 d := v_1.AuxInt 8696 idx := v_1.Args[0] 8697 val := v.Args[2] 8698 mem := v.Args[3] 8699 v.reset(OpAMD64MOVSDstoreidx8) 8700 v.AuxInt = c + 8*d 8701 v.Aux = sym 8702 v.AddArg(ptr) 8703 v.AddArg(idx) 8704 v.AddArg(val) 8705 v.AddArg(mem) 8706 return true 8707 } 8708 return false 8709 } 8710 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool { 8711 b := v.Block 8712 _ = b 8713 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 8714 // cond: is32Bit(off1+off2) 8715 // result: (MOVSSload [off1+off2] {sym} ptr mem) 8716 for { 8717 off1 := v.AuxInt 8718 sym := v.Aux 8719 v_0 := v.Args[0] 8720 if v_0.Op != OpAMD64ADDQconst { 8721 break 8722 } 8723 off2 := v_0.AuxInt 8724 ptr := v_0.Args[0] 8725 mem := v.Args[1] 8726 if !(is32Bit(off1 + off2)) { 8727 break 8728 } 8729 v.reset(OpAMD64MOVSSload) 8730 v.AuxInt = off1 + off2 8731 v.Aux = sym 8732 v.AddArg(ptr) 8733 v.AddArg(mem) 8734 return true 8735 } 8736 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8737 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8738 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8739 for { 8740 off1 := v.AuxInt 8741 sym1 := v.Aux 8742 v_0 := v.Args[0] 8743 if v_0.Op != OpAMD64LEAQ { 8744 break 8745 } 8746 off2 := v_0.AuxInt 8747 sym2 := v_0.Aux 8748 base := v_0.Args[0] 8749 mem := v.Args[1] 8750 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8751 break 8752 } 8753 v.reset(OpAMD64MOVSSload) 8754 v.AuxInt = off1 + off2 8755 v.Aux = mergeSym(sym1, sym2) 8756 v.AddArg(base) 8757 v.AddArg(mem) 8758 return true 8759 } 8760 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8761 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8762 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8763 for { 8764 off1 := v.AuxInt 8765 sym1 := v.Aux 8766 v_0 := v.Args[0] 8767 if v_0.Op != OpAMD64LEAQ1 { 8768 break 8769 } 8770 off2 := v_0.AuxInt 8771 sym2 := v_0.Aux 8772 ptr := v_0.Args[0] 8773 idx := v_0.Args[1] 8774 mem := v.Args[1] 8775 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8776 break 8777 } 8778 v.reset(OpAMD64MOVSSloadidx1) 8779 v.AuxInt = off1 + off2 8780 v.Aux = mergeSym(sym1, sym2) 8781 v.AddArg(ptr) 8782 v.AddArg(idx) 8783 v.AddArg(mem) 8784 return true 8785 } 8786 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 8787 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8788 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8789 for { 8790 off1 := v.AuxInt 8791 sym1 := v.Aux 8792 v_0 := v.Args[0] 8793 if v_0.Op != OpAMD64LEAQ4 { 8794 break 8795 } 8796 off2 := v_0.AuxInt 8797 sym2 := v_0.Aux 8798 ptr := v_0.Args[0] 8799 idx := v_0.Args[1] 8800 mem := v.Args[1] 8801 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8802 break 8803 } 8804 v.reset(OpAMD64MOVSSloadidx4) 8805 v.AuxInt = off1 + off2 8806 v.Aux = mergeSym(sym1, sym2) 8807 v.AddArg(ptr) 8808 v.AddArg(idx) 8809 v.AddArg(mem) 8810 return true 8811 } 8812 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 8813 // cond: ptr.Op != OpSB 8814 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 8815 for { 8816 off := v.AuxInt 8817 sym := v.Aux 8818 v_0 := v.Args[0] 8819 if v_0.Op != OpAMD64ADDQ { 8820 break 8821 } 8822 ptr := v_0.Args[0] 8823 idx := v_0.Args[1] 8824 mem := v.Args[1] 8825 if !(ptr.Op != OpSB) { 8826 break 8827 } 8828 v.reset(OpAMD64MOVSSloadidx1) 8829 v.AuxInt = off 8830 v.Aux = sym 8831 v.AddArg(ptr) 8832 v.AddArg(idx) 8833 v.AddArg(mem) 8834 return true 8835 } 8836 return false 8837 } 8838 func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool { 8839 b := v.Block 8840 _ = b 8841 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 8842 // cond: 8843 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 8844 for { 8845 c := v.AuxInt 8846 sym := v.Aux 8847 ptr := v.Args[0] 8848 v_1 := v.Args[1] 8849 if v_1.Op != OpAMD64SHLQconst { 8850 break 8851 } 8852 if v_1.AuxInt != 2 { 8853 break 8854 } 8855 idx := v_1.Args[0] 8856 mem := v.Args[2] 8857 v.reset(OpAMD64MOVSSloadidx4) 8858 v.AuxInt = c 8859 v.Aux = sym 8860 v.AddArg(ptr) 8861 v.AddArg(idx) 8862 v.AddArg(mem) 8863 return true 8864 } 8865 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8866 // cond: 8867 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 8868 for { 8869 c := v.AuxInt 8870 sym := v.Aux 8871 v_0 := v.Args[0] 8872 if v_0.Op != OpAMD64ADDQconst { 8873 break 8874 } 8875 d := v_0.AuxInt 8876 ptr := v_0.Args[0] 8877 idx := v.Args[1] 8878 mem := v.Args[2] 8879 v.reset(OpAMD64MOVSSloadidx1) 8880 v.AuxInt = c + d 8881 v.Aux = sym 8882 v.AddArg(ptr) 8883 v.AddArg(idx) 8884 v.AddArg(mem) 8885 return true 8886 } 8887 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8888 // cond: 8889 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 8890 for { 8891 c := v.AuxInt 8892 sym := v.Aux 8893 ptr := v.Args[0] 8894 v_1 := v.Args[1] 8895 if v_1.Op != OpAMD64ADDQconst { 8896 break 8897 } 8898 d := v_1.AuxInt 8899 idx := v_1.Args[0] 8900 mem := v.Args[2] 8901 v.reset(OpAMD64MOVSSloadidx1) 8902 v.AuxInt = c + d 8903 v.Aux = sym 8904 v.AddArg(ptr) 8905 v.AddArg(idx) 8906 v.AddArg(mem) 8907 return true 8908 } 8909 return false 8910 } 8911 func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool { 8912 b := v.Block 8913 _ = b 8914 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 8915 // cond: 8916 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 8917 for { 8918 c := v.AuxInt 8919 sym := v.Aux 8920 v_0 := v.Args[0] 8921 if v_0.Op != OpAMD64ADDQconst { 8922 break 8923 } 8924 d := v_0.AuxInt 8925 ptr := v_0.Args[0] 8926 idx := v.Args[1] 8927 mem := v.Args[2] 8928 v.reset(OpAMD64MOVSSloadidx4) 8929 v.AuxInt = c + d 8930 v.Aux = sym 8931 v.AddArg(ptr) 8932 v.AddArg(idx) 8933 v.AddArg(mem) 8934 return true 8935 } 8936 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 8937 // cond: 8938 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 8939 for { 8940 c := v.AuxInt 8941 sym := v.Aux 8942 ptr := v.Args[0] 8943 v_1 := v.Args[1] 8944 if v_1.Op != OpAMD64ADDQconst { 8945 break 8946 } 8947 d := v_1.AuxInt 8948 idx := v_1.Args[0] 8949 mem := v.Args[2] 8950 v.reset(OpAMD64MOVSSloadidx4) 8951 v.AuxInt = c + 4*d 8952 v.Aux = sym 8953 v.AddArg(ptr) 8954 v.AddArg(idx) 8955 v.AddArg(mem) 8956 return true 8957 } 8958 return false 8959 } 8960 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool { 8961 b := v.Block 8962 _ = b 8963 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8964 // cond: is32Bit(off1+off2) 8965 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 8966 for { 8967 off1 := v.AuxInt 8968 sym := v.Aux 8969 v_0 := v.Args[0] 8970 if v_0.Op != OpAMD64ADDQconst { 8971 break 8972 } 8973 off2 := v_0.AuxInt 8974 ptr := v_0.Args[0] 8975 val := v.Args[1] 8976 mem := v.Args[2] 8977 if !(is32Bit(off1 + off2)) { 8978 break 8979 } 8980 v.reset(OpAMD64MOVSSstore) 8981 v.AuxInt = off1 + off2 8982 v.Aux = sym 8983 v.AddArg(ptr) 8984 v.AddArg(val) 8985 v.AddArg(mem) 8986 return true 8987 } 8988 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8989 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8990 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8991 for { 8992 off1 := v.AuxInt 8993 sym1 := v.Aux 8994 v_0 := v.Args[0] 8995 if v_0.Op != OpAMD64LEAQ { 8996 break 8997 } 8998 off2 := v_0.AuxInt 8999 sym2 := v_0.Aux 9000 base := v_0.Args[0] 9001 val := v.Args[1] 9002 mem := v.Args[2] 9003 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9004 break 9005 } 9006 v.reset(OpAMD64MOVSSstore) 9007 v.AuxInt = off1 + off2 9008 v.Aux = mergeSym(sym1, sym2) 9009 v.AddArg(base) 9010 v.AddArg(val) 9011 v.AddArg(mem) 9012 return true 9013 } 9014 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9015 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9016 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9017 for { 9018 off1 := v.AuxInt 9019 sym1 := v.Aux 9020 v_0 := v.Args[0] 9021 if v_0.Op != OpAMD64LEAQ1 { 9022 break 9023 } 9024 off2 := v_0.AuxInt 9025 sym2 := v_0.Aux 9026 ptr := v_0.Args[0] 9027 idx := v_0.Args[1] 9028 val := v.Args[1] 9029 mem := v.Args[2] 9030 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9031 break 9032 } 9033 v.reset(OpAMD64MOVSSstoreidx1) 9034 v.AuxInt = off1 + off2 9035 v.Aux = mergeSym(sym1, sym2) 9036 v.AddArg(ptr) 9037 v.AddArg(idx) 9038 v.AddArg(val) 9039 v.AddArg(mem) 9040 return true 9041 } 9042 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 9043 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9044 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9045 for { 9046 off1 := v.AuxInt 9047 sym1 := v.Aux 9048 v_0 := v.Args[0] 9049 if v_0.Op != OpAMD64LEAQ4 { 9050 break 9051 } 9052 off2 := v_0.AuxInt 9053 sym2 := v_0.Aux 9054 ptr := v_0.Args[0] 9055 idx := v_0.Args[1] 9056 val := v.Args[1] 9057 mem := v.Args[2] 9058 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9059 break 9060 } 9061 v.reset(OpAMD64MOVSSstoreidx4) 9062 v.AuxInt = off1 + off2 9063 v.Aux = mergeSym(sym1, sym2) 9064 v.AddArg(ptr) 9065 v.AddArg(idx) 9066 v.AddArg(val) 9067 v.AddArg(mem) 9068 return true 9069 } 9070 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 9071 // cond: ptr.Op != OpSB 9072 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 9073 for { 9074 off := v.AuxInt 9075 sym := v.Aux 9076 v_0 := v.Args[0] 9077 if v_0.Op != OpAMD64ADDQ { 9078 break 9079 } 9080 ptr := v_0.Args[0] 9081 idx := v_0.Args[1] 9082 val := v.Args[1] 9083 mem := v.Args[2] 9084 if !(ptr.Op != OpSB) { 9085 break 9086 } 9087 v.reset(OpAMD64MOVSSstoreidx1) 9088 v.AuxInt = off 9089 v.Aux = sym 9090 v.AddArg(ptr) 9091 v.AddArg(idx) 9092 v.AddArg(val) 9093 v.AddArg(mem) 9094 return true 9095 } 9096 return false 9097 } 9098 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool { 9099 b := v.Block 9100 _ = b 9101 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 9102 // cond: 9103 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 9104 for { 9105 c := v.AuxInt 9106 sym := v.Aux 9107 ptr := v.Args[0] 9108 v_1 := v.Args[1] 9109 if v_1.Op != OpAMD64SHLQconst { 9110 break 9111 } 9112 if v_1.AuxInt != 2 { 9113 break 9114 } 9115 idx := v_1.Args[0] 9116 val := v.Args[2] 9117 mem := v.Args[3] 9118 v.reset(OpAMD64MOVSSstoreidx4) 9119 v.AuxInt = c 9120 v.Aux = sym 9121 v.AddArg(ptr) 9122 v.AddArg(idx) 9123 v.AddArg(val) 9124 v.AddArg(mem) 9125 return true 9126 } 9127 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9128 // cond: 9129 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 9130 for { 9131 c := v.AuxInt 9132 sym := v.Aux 9133 v_0 := v.Args[0] 9134 if v_0.Op != OpAMD64ADDQconst { 9135 break 9136 } 9137 d := v_0.AuxInt 9138 ptr := v_0.Args[0] 9139 idx := v.Args[1] 9140 val := v.Args[2] 9141 mem := v.Args[3] 9142 v.reset(OpAMD64MOVSSstoreidx1) 9143 v.AuxInt = c + d 9144 v.Aux = sym 9145 v.AddArg(ptr) 9146 v.AddArg(idx) 9147 v.AddArg(val) 9148 v.AddArg(mem) 9149 return true 9150 } 9151 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9152 // cond: 9153 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 9154 for { 9155 c := v.AuxInt 9156 sym := v.Aux 9157 ptr := v.Args[0] 9158 v_1 := v.Args[1] 9159 if v_1.Op != OpAMD64ADDQconst { 9160 break 9161 } 9162 d := v_1.AuxInt 9163 idx := v_1.Args[0] 9164 val := v.Args[2] 9165 mem := v.Args[3] 9166 v.reset(OpAMD64MOVSSstoreidx1) 9167 v.AuxInt = c + d 9168 v.Aux = sym 9169 v.AddArg(ptr) 9170 v.AddArg(idx) 9171 v.AddArg(val) 9172 v.AddArg(mem) 9173 return true 9174 } 9175 return false 9176 } 9177 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool { 9178 b := v.Block 9179 _ = b 9180 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9181 // cond: 9182 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 9183 for { 9184 c := v.AuxInt 9185 sym := v.Aux 9186 v_0 := v.Args[0] 9187 if v_0.Op != OpAMD64ADDQconst { 9188 break 9189 } 9190 d := v_0.AuxInt 9191 ptr := v_0.Args[0] 9192 idx := v.Args[1] 9193 val := v.Args[2] 9194 mem := v.Args[3] 9195 v.reset(OpAMD64MOVSSstoreidx4) 9196 v.AuxInt = c + d 9197 v.Aux = sym 9198 v.AddArg(ptr) 9199 v.AddArg(idx) 9200 v.AddArg(val) 9201 v.AddArg(mem) 9202 return true 9203 } 9204 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9205 // cond: 9206 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 9207 for { 9208 c := v.AuxInt 9209 sym := v.Aux 9210 ptr := v.Args[0] 9211 v_1 := v.Args[1] 9212 if v_1.Op != OpAMD64ADDQconst { 9213 break 9214 } 9215 d := v_1.AuxInt 9216 idx := v_1.Args[0] 9217 val := v.Args[2] 9218 mem := v.Args[3] 9219 v.reset(OpAMD64MOVSSstoreidx4) 9220 v.AuxInt = c + 4*d 9221 v.Aux = sym 9222 v.AddArg(ptr) 9223 v.AddArg(idx) 9224 v.AddArg(val) 9225 v.AddArg(mem) 9226 return true 9227 } 9228 return false 9229 } 9230 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { 9231 b := v.Block 9232 _ = b 9233 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 9234 // cond: x.Uses == 1 && clobber(x) 9235 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 9236 for { 9237 x := v.Args[0] 9238 if x.Op != OpAMD64MOVWload { 9239 break 9240 } 9241 off := x.AuxInt 9242 sym := x.Aux 9243 ptr := x.Args[0] 9244 mem := x.Args[1] 9245 if !(x.Uses == 1 && clobber(x)) { 9246 break 9247 } 9248 b = x.Block 9249 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 9250 v.reset(OpCopy) 9251 v.AddArg(v0) 9252 v0.AuxInt = off 9253 v0.Aux = sym 9254 v0.AddArg(ptr) 9255 v0.AddArg(mem) 9256 return true 9257 } 9258 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 9259 // cond: x.Uses == 1 && clobber(x) 9260 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 9261 for { 9262 x := v.Args[0] 9263 if x.Op != OpAMD64MOVLload { 9264 break 9265 } 9266 off := x.AuxInt 9267 sym := x.Aux 9268 ptr := x.Args[0] 9269 mem := x.Args[1] 9270 if !(x.Uses == 1 && clobber(x)) { 9271 break 9272 } 9273 b = x.Block 9274 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 9275 v.reset(OpCopy) 9276 v.AddArg(v0) 9277 v0.AuxInt = off 9278 v0.Aux = sym 9279 v0.AddArg(ptr) 9280 v0.AddArg(mem) 9281 return true 9282 } 9283 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 9284 // cond: x.Uses == 1 && clobber(x) 9285 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 9286 for { 9287 x := v.Args[0] 9288 if x.Op != OpAMD64MOVQload { 9289 break 9290 } 9291 off := x.AuxInt 9292 sym := x.Aux 9293 ptr := x.Args[0] 9294 mem := x.Args[1] 9295 if !(x.Uses == 1 && clobber(x)) { 9296 break 9297 } 9298 b = x.Block 9299 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 9300 v.reset(OpCopy) 9301 v.AddArg(v0) 9302 v0.AuxInt = off 9303 v0.Aux = sym 9304 v0.AddArg(ptr) 9305 v0.AddArg(mem) 9306 return true 9307 } 9308 // match: (MOVWQSX (ANDLconst [c] x)) 9309 // cond: c & 0x8000 == 0 9310 // result: (ANDLconst [c & 0x7fff] x) 9311 for { 9312 v_0 := v.Args[0] 9313 if v_0.Op != OpAMD64ANDLconst { 9314 break 9315 } 9316 c := v_0.AuxInt 9317 x := v_0.Args[0] 9318 if !(c&0x8000 == 0) { 9319 break 9320 } 9321 v.reset(OpAMD64ANDLconst) 9322 v.AuxInt = c & 0x7fff 9323 v.AddArg(x) 9324 return true 9325 } 9326 return false 9327 } 9328 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool { 9329 b := v.Block 9330 _ = b 9331 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9332 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9333 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9334 for { 9335 off1 := v.AuxInt 9336 sym1 := v.Aux 9337 v_0 := v.Args[0] 9338 if v_0.Op != OpAMD64LEAQ { 9339 break 9340 } 9341 off2 := v_0.AuxInt 9342 sym2 := v_0.Aux 9343 base := v_0.Args[0] 9344 mem := v.Args[1] 9345 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9346 break 9347 } 9348 v.reset(OpAMD64MOVWQSXload) 9349 v.AuxInt = off1 + off2 9350 v.Aux = mergeSym(sym1, sym2) 9351 v.AddArg(base) 9352 v.AddArg(mem) 9353 return true 9354 } 9355 return false 9356 } 9357 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { 9358 b := v.Block 9359 _ = b 9360 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 9361 // cond: x.Uses == 1 && clobber(x) 9362 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 9363 for { 9364 x := v.Args[0] 9365 if x.Op != OpAMD64MOVWload { 9366 break 9367 } 9368 off := x.AuxInt 9369 sym := x.Aux 9370 ptr := x.Args[0] 9371 mem := x.Args[1] 9372 if !(x.Uses == 1 && clobber(x)) { 9373 break 9374 } 9375 b = x.Block 9376 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 9377 v.reset(OpCopy) 9378 v.AddArg(v0) 9379 v0.AuxInt = off 9380 v0.Aux = sym 9381 v0.AddArg(ptr) 9382 v0.AddArg(mem) 9383 return true 9384 } 9385 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 9386 // cond: x.Uses == 1 && clobber(x) 9387 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 9388 for { 9389 x := v.Args[0] 9390 if x.Op != OpAMD64MOVLload { 9391 break 9392 } 9393 off := x.AuxInt 9394 sym := x.Aux 9395 ptr := x.Args[0] 9396 mem := x.Args[1] 9397 if !(x.Uses == 1 && clobber(x)) { 9398 break 9399 } 9400 b = x.Block 9401 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 9402 v.reset(OpCopy) 9403 v.AddArg(v0) 9404 v0.AuxInt = off 9405 v0.Aux = sym 9406 v0.AddArg(ptr) 9407 v0.AddArg(mem) 9408 return true 9409 } 9410 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 9411 // cond: x.Uses == 1 && clobber(x) 9412 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 9413 for { 9414 x := v.Args[0] 9415 if x.Op != OpAMD64MOVQload { 9416 break 9417 } 9418 off := x.AuxInt 9419 sym := x.Aux 9420 ptr := x.Args[0] 9421 mem := x.Args[1] 9422 if !(x.Uses == 1 && clobber(x)) { 9423 break 9424 } 9425 b = x.Block 9426 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 9427 v.reset(OpCopy) 9428 v.AddArg(v0) 9429 v0.AuxInt = off 9430 v0.Aux = sym 9431 v0.AddArg(ptr) 9432 v0.AddArg(mem) 9433 return true 9434 } 9435 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 9436 // cond: x.Uses == 1 && clobber(x) 9437 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 9438 for { 9439 x := v.Args[0] 9440 if x.Op != OpAMD64MOVWloadidx1 { 9441 break 9442 } 9443 off := x.AuxInt 9444 sym := x.Aux 9445 ptr := x.Args[0] 9446 idx := x.Args[1] 9447 mem := x.Args[2] 9448 if !(x.Uses == 1 && clobber(x)) { 9449 break 9450 } 9451 b = x.Block 9452 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 9453 v.reset(OpCopy) 9454 v.AddArg(v0) 9455 v0.AuxInt = off 9456 v0.Aux = sym 9457 v0.AddArg(ptr) 9458 v0.AddArg(idx) 9459 v0.AddArg(mem) 9460 return true 9461 } 9462 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 9463 // cond: x.Uses == 1 && clobber(x) 9464 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 9465 for { 9466 x := v.Args[0] 9467 if x.Op != OpAMD64MOVWloadidx2 { 9468 break 9469 } 9470 off := x.AuxInt 9471 sym := x.Aux 9472 ptr := x.Args[0] 9473 idx := x.Args[1] 9474 mem := x.Args[2] 9475 if !(x.Uses == 1 && clobber(x)) { 9476 break 9477 } 9478 b = x.Block 9479 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 9480 v.reset(OpCopy) 9481 v.AddArg(v0) 9482 v0.AuxInt = off 9483 v0.Aux = sym 9484 v0.AddArg(ptr) 9485 v0.AddArg(idx) 9486 v0.AddArg(mem) 9487 return true 9488 } 9489 // match: (MOVWQZX (ANDLconst [c] x)) 9490 // cond: 9491 // result: (ANDLconst [c & 0xffff] x) 9492 for { 9493 v_0 := v.Args[0] 9494 if v_0.Op != OpAMD64ANDLconst { 9495 break 9496 } 9497 c := v_0.AuxInt 9498 x := v_0.Args[0] 9499 v.reset(OpAMD64ANDLconst) 9500 v.AuxInt = c & 0xffff 9501 v.AddArg(x) 9502 return true 9503 } 9504 return false 9505 } 9506 func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool { 9507 b := v.Block 9508 _ = b 9509 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 9510 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 9511 // result: x 9512 for { 9513 off := v.AuxInt 9514 sym := v.Aux 9515 ptr := v.Args[0] 9516 v_1 := v.Args[1] 9517 if v_1.Op != OpAMD64MOVWstore { 9518 break 9519 } 9520 off2 := v_1.AuxInt 9521 sym2 := v_1.Aux 9522 ptr2 := v_1.Args[0] 9523 x := v_1.Args[1] 9524 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 9525 break 9526 } 9527 v.reset(OpCopy) 9528 v.Type = x.Type 9529 v.AddArg(x) 9530 return true 9531 } 9532 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 9533 // cond: is32Bit(off1+off2) 9534 // result: (MOVWload [off1+off2] {sym} ptr mem) 9535 for { 9536 off1 := v.AuxInt 9537 sym := v.Aux 9538 v_0 := v.Args[0] 9539 if v_0.Op != OpAMD64ADDQconst { 9540 break 9541 } 9542 off2 := v_0.AuxInt 9543 ptr := v_0.Args[0] 9544 mem := v.Args[1] 9545 if !(is32Bit(off1 + off2)) { 9546 break 9547 } 9548 v.reset(OpAMD64MOVWload) 9549 v.AuxInt = off1 + off2 9550 v.Aux = sym 9551 v.AddArg(ptr) 9552 v.AddArg(mem) 9553 return true 9554 } 9555 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9556 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9557 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9558 for { 9559 off1 := v.AuxInt 9560 sym1 := v.Aux 9561 v_0 := v.Args[0] 9562 if v_0.Op != OpAMD64LEAQ { 9563 break 9564 } 9565 off2 := v_0.AuxInt 9566 sym2 := v_0.Aux 9567 base := v_0.Args[0] 9568 mem := v.Args[1] 9569 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9570 break 9571 } 9572 v.reset(OpAMD64MOVWload) 9573 v.AuxInt = off1 + off2 9574 v.Aux = mergeSym(sym1, sym2) 9575 v.AddArg(base) 9576 v.AddArg(mem) 9577 return true 9578 } 9579 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9580 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9581 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9582 for { 9583 off1 := v.AuxInt 9584 sym1 := v.Aux 9585 v_0 := v.Args[0] 9586 if v_0.Op != OpAMD64LEAQ1 { 9587 break 9588 } 9589 off2 := v_0.AuxInt 9590 sym2 := v_0.Aux 9591 ptr := v_0.Args[0] 9592 idx := v_0.Args[1] 9593 mem := v.Args[1] 9594 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9595 break 9596 } 9597 v.reset(OpAMD64MOVWloadidx1) 9598 v.AuxInt = off1 + off2 9599 v.Aux = mergeSym(sym1, sym2) 9600 v.AddArg(ptr) 9601 v.AddArg(idx) 9602 v.AddArg(mem) 9603 return true 9604 } 9605 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 9606 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9607 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9608 for { 9609 off1 := v.AuxInt 9610 sym1 := v.Aux 9611 v_0 := v.Args[0] 9612 if v_0.Op != OpAMD64LEAQ2 { 9613 break 9614 } 9615 off2 := v_0.AuxInt 9616 sym2 := v_0.Aux 9617 ptr := v_0.Args[0] 9618 idx := v_0.Args[1] 9619 mem := v.Args[1] 9620 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9621 break 9622 } 9623 v.reset(OpAMD64MOVWloadidx2) 9624 v.AuxInt = off1 + off2 9625 v.Aux = mergeSym(sym1, sym2) 9626 v.AddArg(ptr) 9627 v.AddArg(idx) 9628 v.AddArg(mem) 9629 return true 9630 } 9631 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 9632 // cond: ptr.Op != OpSB 9633 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 9634 for { 9635 off := v.AuxInt 9636 sym := v.Aux 9637 v_0 := v.Args[0] 9638 if v_0.Op != OpAMD64ADDQ { 9639 break 9640 } 9641 ptr := v_0.Args[0] 9642 idx := v_0.Args[1] 9643 mem := v.Args[1] 9644 if !(ptr.Op != OpSB) { 9645 break 9646 } 9647 v.reset(OpAMD64MOVWloadidx1) 9648 v.AuxInt = off 9649 v.Aux = sym 9650 v.AddArg(ptr) 9651 v.AddArg(idx) 9652 v.AddArg(mem) 9653 return true 9654 } 9655 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 9656 // cond: canMergeSym(sym1, sym2) 9657 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9658 for { 9659 off1 := v.AuxInt 9660 sym1 := v.Aux 9661 v_0 := v.Args[0] 9662 if v_0.Op != OpAMD64LEAL { 9663 break 9664 } 9665 off2 := v_0.AuxInt 9666 sym2 := v_0.Aux 9667 base := v_0.Args[0] 9668 mem := v.Args[1] 9669 if !(canMergeSym(sym1, sym2)) { 9670 break 9671 } 9672 v.reset(OpAMD64MOVWload) 9673 v.AuxInt = off1 + off2 9674 v.Aux = mergeSym(sym1, sym2) 9675 v.AddArg(base) 9676 v.AddArg(mem) 9677 return true 9678 } 9679 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 9680 // cond: is32Bit(off1+off2) 9681 // result: (MOVWload [off1+off2] {sym} ptr mem) 9682 for { 9683 off1 := v.AuxInt 9684 sym := v.Aux 9685 v_0 := v.Args[0] 9686 if v_0.Op != OpAMD64ADDLconst { 9687 break 9688 } 9689 off2 := v_0.AuxInt 9690 ptr := v_0.Args[0] 9691 mem := v.Args[1] 9692 if !(is32Bit(off1 + off2)) { 9693 break 9694 } 9695 v.reset(OpAMD64MOVWload) 9696 v.AuxInt = off1 + off2 9697 v.Aux = sym 9698 v.AddArg(ptr) 9699 v.AddArg(mem) 9700 return true 9701 } 9702 return false 9703 } 9704 func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool { 9705 b := v.Block 9706 _ = b 9707 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 9708 // cond: 9709 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 9710 for { 9711 c := v.AuxInt 9712 sym := v.Aux 9713 ptr := v.Args[0] 9714 v_1 := v.Args[1] 9715 if v_1.Op != OpAMD64SHLQconst { 9716 break 9717 } 9718 if v_1.AuxInt != 1 { 9719 break 9720 } 9721 idx := v_1.Args[0] 9722 mem := v.Args[2] 9723 v.reset(OpAMD64MOVWloadidx2) 9724 v.AuxInt = c 9725 v.Aux = sym 9726 v.AddArg(ptr) 9727 v.AddArg(idx) 9728 v.AddArg(mem) 9729 return true 9730 } 9731 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9732 // cond: 9733 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 9734 for { 9735 c := v.AuxInt 9736 sym := v.Aux 9737 v_0 := v.Args[0] 9738 if v_0.Op != OpAMD64ADDQconst { 9739 break 9740 } 9741 d := v_0.AuxInt 9742 ptr := v_0.Args[0] 9743 idx := v.Args[1] 9744 mem := v.Args[2] 9745 v.reset(OpAMD64MOVWloadidx1) 9746 v.AuxInt = c + d 9747 v.Aux = sym 9748 v.AddArg(ptr) 9749 v.AddArg(idx) 9750 v.AddArg(mem) 9751 return true 9752 } 9753 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9754 // cond: 9755 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 9756 for { 9757 c := v.AuxInt 9758 sym := v.Aux 9759 ptr := v.Args[0] 9760 v_1 := v.Args[1] 9761 if v_1.Op != OpAMD64ADDQconst { 9762 break 9763 } 9764 d := v_1.AuxInt 9765 idx := v_1.Args[0] 9766 mem := v.Args[2] 9767 v.reset(OpAMD64MOVWloadidx1) 9768 v.AuxInt = c + d 9769 v.Aux = sym 9770 v.AddArg(ptr) 9771 v.AddArg(idx) 9772 v.AddArg(mem) 9773 return true 9774 } 9775 return false 9776 } 9777 func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool { 9778 b := v.Block 9779 _ = b 9780 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 9781 // cond: 9782 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 9783 for { 9784 c := v.AuxInt 9785 sym := v.Aux 9786 v_0 := v.Args[0] 9787 if v_0.Op != OpAMD64ADDQconst { 9788 break 9789 } 9790 d := v_0.AuxInt 9791 ptr := v_0.Args[0] 9792 idx := v.Args[1] 9793 mem := v.Args[2] 9794 v.reset(OpAMD64MOVWloadidx2) 9795 v.AuxInt = c + d 9796 v.Aux = sym 9797 v.AddArg(ptr) 9798 v.AddArg(idx) 9799 v.AddArg(mem) 9800 return true 9801 } 9802 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 9803 // cond: 9804 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 9805 for { 9806 c := v.AuxInt 9807 sym := v.Aux 9808 ptr := v.Args[0] 9809 v_1 := v.Args[1] 9810 if v_1.Op != OpAMD64ADDQconst { 9811 break 9812 } 9813 d := v_1.AuxInt 9814 idx := v_1.Args[0] 9815 mem := v.Args[2] 9816 v.reset(OpAMD64MOVWloadidx2) 9817 v.AuxInt = c + 2*d 9818 v.Aux = sym 9819 v.AddArg(ptr) 9820 v.AddArg(idx) 9821 v.AddArg(mem) 9822 return true 9823 } 9824 return false 9825 } 9826 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { 9827 b := v.Block 9828 _ = b 9829 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 9830 // cond: 9831 // result: (MOVWstore [off] {sym} ptr x mem) 9832 for { 9833 off := v.AuxInt 9834 sym := v.Aux 9835 ptr := v.Args[0] 9836 v_1 := v.Args[1] 9837 if v_1.Op != OpAMD64MOVWQSX { 9838 break 9839 } 9840 x := v_1.Args[0] 9841 mem := v.Args[2] 9842 v.reset(OpAMD64MOVWstore) 9843 v.AuxInt = off 9844 v.Aux = sym 9845 v.AddArg(ptr) 9846 v.AddArg(x) 9847 v.AddArg(mem) 9848 return true 9849 } 9850 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 9851 // cond: 9852 // result: (MOVWstore [off] {sym} ptr x mem) 9853 for { 9854 off := v.AuxInt 9855 sym := v.Aux 9856 ptr := v.Args[0] 9857 v_1 := v.Args[1] 9858 if v_1.Op != OpAMD64MOVWQZX { 9859 break 9860 } 9861 x := v_1.Args[0] 9862 mem := v.Args[2] 9863 v.reset(OpAMD64MOVWstore) 9864 v.AuxInt = off 9865 v.Aux = sym 9866 v.AddArg(ptr) 9867 v.AddArg(x) 9868 v.AddArg(mem) 9869 return true 9870 } 9871 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9872 // cond: is32Bit(off1+off2) 9873 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 9874 for { 9875 off1 := v.AuxInt 9876 sym := v.Aux 9877 v_0 := v.Args[0] 9878 if v_0.Op != OpAMD64ADDQconst { 9879 break 9880 } 9881 off2 := v_0.AuxInt 9882 ptr := v_0.Args[0] 9883 val := v.Args[1] 9884 mem := v.Args[2] 9885 if !(is32Bit(off1 + off2)) { 9886 break 9887 } 9888 v.reset(OpAMD64MOVWstore) 9889 v.AuxInt = off1 + off2 9890 v.Aux = sym 9891 v.AddArg(ptr) 9892 v.AddArg(val) 9893 v.AddArg(mem) 9894 return true 9895 } 9896 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 9897 // cond: validOff(off) 9898 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 9899 for { 9900 off := v.AuxInt 9901 sym := v.Aux 9902 ptr := v.Args[0] 9903 v_1 := v.Args[1] 9904 if v_1.Op != OpAMD64MOVLconst { 9905 break 9906 } 9907 c := v_1.AuxInt 9908 mem := v.Args[2] 9909 if !(validOff(off)) { 9910 break 9911 } 9912 v.reset(OpAMD64MOVWstoreconst) 9913 v.AuxInt = makeValAndOff(int64(int16(c)), off) 9914 v.Aux = sym 9915 v.AddArg(ptr) 9916 v.AddArg(mem) 9917 return true 9918 } 9919 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9920 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9921 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9922 for { 9923 off1 := v.AuxInt 9924 sym1 := v.Aux 9925 v_0 := v.Args[0] 9926 if v_0.Op != OpAMD64LEAQ { 9927 break 9928 } 9929 off2 := v_0.AuxInt 9930 sym2 := v_0.Aux 9931 base := v_0.Args[0] 9932 val := v.Args[1] 9933 mem := v.Args[2] 9934 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9935 break 9936 } 9937 v.reset(OpAMD64MOVWstore) 9938 v.AuxInt = off1 + off2 9939 v.Aux = mergeSym(sym1, sym2) 9940 v.AddArg(base) 9941 v.AddArg(val) 9942 v.AddArg(mem) 9943 return true 9944 } 9945 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9946 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9947 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9948 for { 9949 off1 := v.AuxInt 9950 sym1 := v.Aux 9951 v_0 := v.Args[0] 9952 if v_0.Op != OpAMD64LEAQ1 { 9953 break 9954 } 9955 off2 := v_0.AuxInt 9956 sym2 := v_0.Aux 9957 ptr := v_0.Args[0] 9958 idx := v_0.Args[1] 9959 val := v.Args[1] 9960 mem := v.Args[2] 9961 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9962 break 9963 } 9964 v.reset(OpAMD64MOVWstoreidx1) 9965 v.AuxInt = off1 + off2 9966 v.Aux = mergeSym(sym1, sym2) 9967 v.AddArg(ptr) 9968 v.AddArg(idx) 9969 v.AddArg(val) 9970 v.AddArg(mem) 9971 return true 9972 } 9973 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 9974 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9975 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9976 for { 9977 off1 := v.AuxInt 9978 sym1 := v.Aux 9979 v_0 := v.Args[0] 9980 if v_0.Op != OpAMD64LEAQ2 { 9981 break 9982 } 9983 off2 := v_0.AuxInt 9984 sym2 := v_0.Aux 9985 ptr := v_0.Args[0] 9986 idx := v_0.Args[1] 9987 val := v.Args[1] 9988 mem := v.Args[2] 9989 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9990 break 9991 } 9992 v.reset(OpAMD64MOVWstoreidx2) 9993 v.AuxInt = off1 + off2 9994 v.Aux = mergeSym(sym1, sym2) 9995 v.AddArg(ptr) 9996 v.AddArg(idx) 9997 v.AddArg(val) 9998 v.AddArg(mem) 9999 return true 10000 } 10001 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 10002 // cond: ptr.Op != OpSB 10003 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 10004 for { 10005 off := v.AuxInt 10006 sym := v.Aux 10007 v_0 := v.Args[0] 10008 if v_0.Op != OpAMD64ADDQ { 10009 break 10010 } 10011 ptr := v_0.Args[0] 10012 idx := v_0.Args[1] 10013 val := v.Args[1] 10014 mem := v.Args[2] 10015 if !(ptr.Op != OpSB) { 10016 break 10017 } 10018 v.reset(OpAMD64MOVWstoreidx1) 10019 v.AuxInt = off 10020 v.Aux = sym 10021 v.AddArg(ptr) 10022 v.AddArg(idx) 10023 v.AddArg(val) 10024 v.AddArg(mem) 10025 return true 10026 } 10027 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 10028 // cond: x.Uses == 1 && clobber(x) 10029 // result: (MOVLstore [i-2] {s} p w mem) 10030 for { 10031 i := v.AuxInt 10032 s := v.Aux 10033 p := v.Args[0] 10034 v_1 := v.Args[1] 10035 if v_1.Op != OpAMD64SHRQconst { 10036 break 10037 } 10038 if v_1.AuxInt != 16 { 10039 break 10040 } 10041 w := v_1.Args[0] 10042 x := v.Args[2] 10043 if x.Op != OpAMD64MOVWstore { 10044 break 10045 } 10046 if x.AuxInt != i-2 { 10047 break 10048 } 10049 if x.Aux != s { 10050 break 10051 } 10052 if p != x.Args[0] { 10053 break 10054 } 10055 if w != x.Args[1] { 10056 break 10057 } 10058 mem := x.Args[2] 10059 if !(x.Uses == 1 && clobber(x)) { 10060 break 10061 } 10062 v.reset(OpAMD64MOVLstore) 10063 v.AuxInt = i - 2 10064 v.Aux = s 10065 v.AddArg(p) 10066 v.AddArg(w) 10067 v.AddArg(mem) 10068 return true 10069 } 10070 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 10071 // cond: x.Uses == 1 && clobber(x) 10072 // result: (MOVLstore [i-2] {s} p w0 mem) 10073 for { 10074 i := v.AuxInt 10075 s := v.Aux 10076 p := v.Args[0] 10077 v_1 := v.Args[1] 10078 if v_1.Op != OpAMD64SHRQconst { 10079 break 10080 } 10081 j := v_1.AuxInt 10082 w := v_1.Args[0] 10083 x := v.Args[2] 10084 if x.Op != OpAMD64MOVWstore { 10085 break 10086 } 10087 if x.AuxInt != i-2 { 10088 break 10089 } 10090 if x.Aux != s { 10091 break 10092 } 10093 if p != x.Args[0] { 10094 break 10095 } 10096 w0 := x.Args[1] 10097 if w0.Op != OpAMD64SHRQconst { 10098 break 10099 } 10100 if w0.AuxInt != j-16 { 10101 break 10102 } 10103 if w != w0.Args[0] { 10104 break 10105 } 10106 mem := x.Args[2] 10107 if !(x.Uses == 1 && clobber(x)) { 10108 break 10109 } 10110 v.reset(OpAMD64MOVLstore) 10111 v.AuxInt = i - 2 10112 v.Aux = s 10113 v.AddArg(p) 10114 v.AddArg(w0) 10115 v.AddArg(mem) 10116 return true 10117 } 10118 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 10119 // cond: canMergeSym(sym1, sym2) 10120 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10121 for { 10122 off1 := v.AuxInt 10123 sym1 := v.Aux 10124 v_0 := v.Args[0] 10125 if v_0.Op != OpAMD64LEAL { 10126 break 10127 } 10128 off2 := v_0.AuxInt 10129 sym2 := v_0.Aux 10130 base := v_0.Args[0] 10131 val := v.Args[1] 10132 mem := v.Args[2] 10133 if !(canMergeSym(sym1, sym2)) { 10134 break 10135 } 10136 v.reset(OpAMD64MOVWstore) 10137 v.AuxInt = off1 + off2 10138 v.Aux = mergeSym(sym1, sym2) 10139 v.AddArg(base) 10140 v.AddArg(val) 10141 v.AddArg(mem) 10142 return true 10143 } 10144 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 10145 // cond: is32Bit(off1+off2) 10146 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 10147 for { 10148 off1 := v.AuxInt 10149 sym := v.Aux 10150 v_0 := v.Args[0] 10151 if v_0.Op != OpAMD64ADDLconst { 10152 break 10153 } 10154 off2 := v_0.AuxInt 10155 ptr := v_0.Args[0] 10156 val := v.Args[1] 10157 mem := v.Args[2] 10158 if !(is32Bit(off1 + off2)) { 10159 break 10160 } 10161 v.reset(OpAMD64MOVWstore) 10162 v.AuxInt = off1 + off2 10163 v.Aux = sym 10164 v.AddArg(ptr) 10165 v.AddArg(val) 10166 v.AddArg(mem) 10167 return true 10168 } 10169 return false 10170 } 10171 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool { 10172 b := v.Block 10173 _ = b 10174 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 10175 // cond: ValAndOff(sc).canAdd(off) 10176 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10177 for { 10178 sc := v.AuxInt 10179 s := v.Aux 10180 v_0 := v.Args[0] 10181 if v_0.Op != OpAMD64ADDQconst { 10182 break 10183 } 10184 off := v_0.AuxInt 10185 ptr := v_0.Args[0] 10186 mem := v.Args[1] 10187 if !(ValAndOff(sc).canAdd(off)) { 10188 break 10189 } 10190 v.reset(OpAMD64MOVWstoreconst) 10191 v.AuxInt = ValAndOff(sc).add(off) 10192 v.Aux = s 10193 v.AddArg(ptr) 10194 v.AddArg(mem) 10195 return true 10196 } 10197 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 10198 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10199 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10200 for { 10201 sc := v.AuxInt 10202 sym1 := v.Aux 10203 v_0 := v.Args[0] 10204 if v_0.Op != OpAMD64LEAQ { 10205 break 10206 } 10207 off := v_0.AuxInt 10208 sym2 := v_0.Aux 10209 ptr := v_0.Args[0] 10210 mem := v.Args[1] 10211 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10212 break 10213 } 10214 v.reset(OpAMD64MOVWstoreconst) 10215 v.AuxInt = ValAndOff(sc).add(off) 10216 v.Aux = mergeSym(sym1, sym2) 10217 v.AddArg(ptr) 10218 v.AddArg(mem) 10219 return true 10220 } 10221 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 10222 // cond: canMergeSym(sym1, sym2) 10223 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10224 for { 10225 x := v.AuxInt 10226 sym1 := v.Aux 10227 v_0 := v.Args[0] 10228 if v_0.Op != OpAMD64LEAQ1 { 10229 break 10230 } 10231 off := v_0.AuxInt 10232 sym2 := v_0.Aux 10233 ptr := v_0.Args[0] 10234 idx := v_0.Args[1] 10235 mem := v.Args[1] 10236 if !(canMergeSym(sym1, sym2)) { 10237 break 10238 } 10239 v.reset(OpAMD64MOVWstoreconstidx1) 10240 v.AuxInt = ValAndOff(x).add(off) 10241 v.Aux = mergeSym(sym1, sym2) 10242 v.AddArg(ptr) 10243 v.AddArg(idx) 10244 v.AddArg(mem) 10245 return true 10246 } 10247 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 10248 // cond: canMergeSym(sym1, sym2) 10249 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10250 for { 10251 x := v.AuxInt 10252 sym1 := v.Aux 10253 v_0 := v.Args[0] 10254 if v_0.Op != OpAMD64LEAQ2 { 10255 break 10256 } 10257 off := v_0.AuxInt 10258 sym2 := v_0.Aux 10259 ptr := v_0.Args[0] 10260 idx := v_0.Args[1] 10261 mem := v.Args[1] 10262 if !(canMergeSym(sym1, sym2)) { 10263 break 10264 } 10265 v.reset(OpAMD64MOVWstoreconstidx2) 10266 v.AuxInt = ValAndOff(x).add(off) 10267 v.Aux = mergeSym(sym1, sym2) 10268 v.AddArg(ptr) 10269 v.AddArg(idx) 10270 v.AddArg(mem) 10271 return true 10272 } 10273 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 10274 // cond: 10275 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 10276 for { 10277 x := v.AuxInt 10278 sym := v.Aux 10279 v_0 := v.Args[0] 10280 if v_0.Op != OpAMD64ADDQ { 10281 break 10282 } 10283 ptr := v_0.Args[0] 10284 idx := v_0.Args[1] 10285 mem := v.Args[1] 10286 v.reset(OpAMD64MOVWstoreconstidx1) 10287 v.AuxInt = x 10288 v.Aux = sym 10289 v.AddArg(ptr) 10290 v.AddArg(idx) 10291 v.AddArg(mem) 10292 return true 10293 } 10294 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 10295 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 10296 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 10297 for { 10298 c := v.AuxInt 10299 s := v.Aux 10300 p := v.Args[0] 10301 x := v.Args[1] 10302 if x.Op != OpAMD64MOVWstoreconst { 10303 break 10304 } 10305 a := x.AuxInt 10306 if x.Aux != s { 10307 break 10308 } 10309 if p != x.Args[0] { 10310 break 10311 } 10312 mem := x.Args[1] 10313 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 10314 break 10315 } 10316 v.reset(OpAMD64MOVLstoreconst) 10317 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 10318 v.Aux = s 10319 v.AddArg(p) 10320 v.AddArg(mem) 10321 return true 10322 } 10323 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 10324 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10325 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10326 for { 10327 sc := v.AuxInt 10328 sym1 := v.Aux 10329 v_0 := v.Args[0] 10330 if v_0.Op != OpAMD64LEAL { 10331 break 10332 } 10333 off := v_0.AuxInt 10334 sym2 := v_0.Aux 10335 ptr := v_0.Args[0] 10336 mem := v.Args[1] 10337 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10338 break 10339 } 10340 v.reset(OpAMD64MOVWstoreconst) 10341 v.AuxInt = ValAndOff(sc).add(off) 10342 v.Aux = mergeSym(sym1, sym2) 10343 v.AddArg(ptr) 10344 v.AddArg(mem) 10345 return true 10346 } 10347 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 10348 // cond: ValAndOff(sc).canAdd(off) 10349 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10350 for { 10351 sc := v.AuxInt 10352 s := v.Aux 10353 v_0 := v.Args[0] 10354 if v_0.Op != OpAMD64ADDLconst { 10355 break 10356 } 10357 off := v_0.AuxInt 10358 ptr := v_0.Args[0] 10359 mem := v.Args[1] 10360 if !(ValAndOff(sc).canAdd(off)) { 10361 break 10362 } 10363 v.reset(OpAMD64MOVWstoreconst) 10364 v.AuxInt = ValAndOff(sc).add(off) 10365 v.Aux = s 10366 v.AddArg(ptr) 10367 v.AddArg(mem) 10368 return true 10369 } 10370 return false 10371 } 10372 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool { 10373 b := v.Block 10374 _ = b 10375 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 10376 // cond: 10377 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 10378 for { 10379 c := v.AuxInt 10380 sym := v.Aux 10381 ptr := v.Args[0] 10382 v_1 := v.Args[1] 10383 if v_1.Op != OpAMD64SHLQconst { 10384 break 10385 } 10386 if v_1.AuxInt != 1 { 10387 break 10388 } 10389 idx := v_1.Args[0] 10390 mem := v.Args[2] 10391 v.reset(OpAMD64MOVWstoreconstidx2) 10392 v.AuxInt = c 10393 v.Aux = sym 10394 v.AddArg(ptr) 10395 v.AddArg(idx) 10396 v.AddArg(mem) 10397 return true 10398 } 10399 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 10400 // cond: 10401 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10402 for { 10403 x := v.AuxInt 10404 sym := v.Aux 10405 v_0 := v.Args[0] 10406 if v_0.Op != OpAMD64ADDQconst { 10407 break 10408 } 10409 c := v_0.AuxInt 10410 ptr := v_0.Args[0] 10411 idx := v.Args[1] 10412 mem := v.Args[2] 10413 v.reset(OpAMD64MOVWstoreconstidx1) 10414 v.AuxInt = ValAndOff(x).add(c) 10415 v.Aux = sym 10416 v.AddArg(ptr) 10417 v.AddArg(idx) 10418 v.AddArg(mem) 10419 return true 10420 } 10421 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 10422 // cond: 10423 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10424 for { 10425 x := v.AuxInt 10426 sym := v.Aux 10427 ptr := v.Args[0] 10428 v_1 := v.Args[1] 10429 if v_1.Op != OpAMD64ADDQconst { 10430 break 10431 } 10432 c := v_1.AuxInt 10433 idx := v_1.Args[0] 10434 mem := v.Args[2] 10435 v.reset(OpAMD64MOVWstoreconstidx1) 10436 v.AuxInt = ValAndOff(x).add(c) 10437 v.Aux = sym 10438 v.AddArg(ptr) 10439 v.AddArg(idx) 10440 v.AddArg(mem) 10441 return true 10442 } 10443 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 10444 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 10445 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 10446 for { 10447 c := v.AuxInt 10448 s := v.Aux 10449 p := v.Args[0] 10450 i := v.Args[1] 10451 x := v.Args[2] 10452 if x.Op != OpAMD64MOVWstoreconstidx1 { 10453 break 10454 } 10455 a := x.AuxInt 10456 if x.Aux != s { 10457 break 10458 } 10459 if p != x.Args[0] { 10460 break 10461 } 10462 if i != x.Args[1] { 10463 break 10464 } 10465 mem := x.Args[2] 10466 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 10467 break 10468 } 10469 v.reset(OpAMD64MOVLstoreconstidx1) 10470 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 10471 v.Aux = s 10472 v.AddArg(p) 10473 v.AddArg(i) 10474 v.AddArg(mem) 10475 return true 10476 } 10477 return false 10478 } 10479 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool { 10480 b := v.Block 10481 _ = b 10482 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 10483 // cond: 10484 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10485 for { 10486 x := v.AuxInt 10487 sym := v.Aux 10488 v_0 := v.Args[0] 10489 if v_0.Op != OpAMD64ADDQconst { 10490 break 10491 } 10492 c := v_0.AuxInt 10493 ptr := v_0.Args[0] 10494 idx := v.Args[1] 10495 mem := v.Args[2] 10496 v.reset(OpAMD64MOVWstoreconstidx2) 10497 v.AuxInt = ValAndOff(x).add(c) 10498 v.Aux = sym 10499 v.AddArg(ptr) 10500 v.AddArg(idx) 10501 v.AddArg(mem) 10502 return true 10503 } 10504 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 10505 // cond: 10506 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 10507 for { 10508 x := v.AuxInt 10509 sym := v.Aux 10510 ptr := v.Args[0] 10511 v_1 := v.Args[1] 10512 if v_1.Op != OpAMD64ADDQconst { 10513 break 10514 } 10515 c := v_1.AuxInt 10516 idx := v_1.Args[0] 10517 mem := v.Args[2] 10518 v.reset(OpAMD64MOVWstoreconstidx2) 10519 v.AuxInt = ValAndOff(x).add(2 * c) 10520 v.Aux = sym 10521 v.AddArg(ptr) 10522 v.AddArg(idx) 10523 v.AddArg(mem) 10524 return true 10525 } 10526 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 10527 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 10528 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 10529 for { 10530 c := v.AuxInt 10531 s := v.Aux 10532 p := v.Args[0] 10533 i := v.Args[1] 10534 x := v.Args[2] 10535 if x.Op != OpAMD64MOVWstoreconstidx2 { 10536 break 10537 } 10538 a := x.AuxInt 10539 if x.Aux != s { 10540 break 10541 } 10542 if p != x.Args[0] { 10543 break 10544 } 10545 if i != x.Args[1] { 10546 break 10547 } 10548 mem := x.Args[2] 10549 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 10550 break 10551 } 10552 v.reset(OpAMD64MOVLstoreconstidx1) 10553 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 10554 v.Aux = s 10555 v.AddArg(p) 10556 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 10557 v0.AuxInt = 1 10558 v0.AddArg(i) 10559 v.AddArg(v0) 10560 v.AddArg(mem) 10561 return true 10562 } 10563 return false 10564 } 10565 func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool { 10566 b := v.Block 10567 _ = b 10568 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 10569 // cond: 10570 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 10571 for { 10572 c := v.AuxInt 10573 sym := v.Aux 10574 ptr := v.Args[0] 10575 v_1 := v.Args[1] 10576 if v_1.Op != OpAMD64SHLQconst { 10577 break 10578 } 10579 if v_1.AuxInt != 1 { 10580 break 10581 } 10582 idx := v_1.Args[0] 10583 val := v.Args[2] 10584 mem := v.Args[3] 10585 v.reset(OpAMD64MOVWstoreidx2) 10586 v.AuxInt = c 10587 v.Aux = sym 10588 v.AddArg(ptr) 10589 v.AddArg(idx) 10590 v.AddArg(val) 10591 v.AddArg(mem) 10592 return true 10593 } 10594 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10595 // cond: 10596 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 10597 for { 10598 c := v.AuxInt 10599 sym := v.Aux 10600 v_0 := v.Args[0] 10601 if v_0.Op != OpAMD64ADDQconst { 10602 break 10603 } 10604 d := v_0.AuxInt 10605 ptr := v_0.Args[0] 10606 idx := v.Args[1] 10607 val := v.Args[2] 10608 mem := v.Args[3] 10609 v.reset(OpAMD64MOVWstoreidx1) 10610 v.AuxInt = c + d 10611 v.Aux = sym 10612 v.AddArg(ptr) 10613 v.AddArg(idx) 10614 v.AddArg(val) 10615 v.AddArg(mem) 10616 return true 10617 } 10618 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10619 // cond: 10620 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 10621 for { 10622 c := v.AuxInt 10623 sym := v.Aux 10624 ptr := v.Args[0] 10625 v_1 := v.Args[1] 10626 if v_1.Op != OpAMD64ADDQconst { 10627 break 10628 } 10629 d := v_1.AuxInt 10630 idx := v_1.Args[0] 10631 val := v.Args[2] 10632 mem := v.Args[3] 10633 v.reset(OpAMD64MOVWstoreidx1) 10634 v.AuxInt = c + d 10635 v.Aux = sym 10636 v.AddArg(ptr) 10637 v.AddArg(idx) 10638 v.AddArg(val) 10639 v.AddArg(mem) 10640 return true 10641 } 10642 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 10643 // cond: x.Uses == 1 && clobber(x) 10644 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 10645 for { 10646 i := v.AuxInt 10647 s := v.Aux 10648 p := v.Args[0] 10649 idx := v.Args[1] 10650 v_2 := v.Args[2] 10651 if v_2.Op != OpAMD64SHRQconst { 10652 break 10653 } 10654 if v_2.AuxInt != 16 { 10655 break 10656 } 10657 w := v_2.Args[0] 10658 x := v.Args[3] 10659 if x.Op != OpAMD64MOVWstoreidx1 { 10660 break 10661 } 10662 if x.AuxInt != i-2 { 10663 break 10664 } 10665 if x.Aux != s { 10666 break 10667 } 10668 if p != x.Args[0] { 10669 break 10670 } 10671 if idx != x.Args[1] { 10672 break 10673 } 10674 if w != x.Args[2] { 10675 break 10676 } 10677 mem := x.Args[3] 10678 if !(x.Uses == 1 && clobber(x)) { 10679 break 10680 } 10681 v.reset(OpAMD64MOVLstoreidx1) 10682 v.AuxInt = i - 2 10683 v.Aux = s 10684 v.AddArg(p) 10685 v.AddArg(idx) 10686 v.AddArg(w) 10687 v.AddArg(mem) 10688 return true 10689 } 10690 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 10691 // cond: x.Uses == 1 && clobber(x) 10692 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 10693 for { 10694 i := v.AuxInt 10695 s := v.Aux 10696 p := v.Args[0] 10697 idx := v.Args[1] 10698 v_2 := v.Args[2] 10699 if v_2.Op != OpAMD64SHRQconst { 10700 break 10701 } 10702 j := v_2.AuxInt 10703 w := v_2.Args[0] 10704 x := v.Args[3] 10705 if x.Op != OpAMD64MOVWstoreidx1 { 10706 break 10707 } 10708 if x.AuxInt != i-2 { 10709 break 10710 } 10711 if x.Aux != s { 10712 break 10713 } 10714 if p != x.Args[0] { 10715 break 10716 } 10717 if idx != x.Args[1] { 10718 break 10719 } 10720 w0 := x.Args[2] 10721 if w0.Op != OpAMD64SHRQconst { 10722 break 10723 } 10724 if w0.AuxInt != j-16 { 10725 break 10726 } 10727 if w != w0.Args[0] { 10728 break 10729 } 10730 mem := x.Args[3] 10731 if !(x.Uses == 1 && clobber(x)) { 10732 break 10733 } 10734 v.reset(OpAMD64MOVLstoreidx1) 10735 v.AuxInt = i - 2 10736 v.Aux = s 10737 v.AddArg(p) 10738 v.AddArg(idx) 10739 v.AddArg(w0) 10740 v.AddArg(mem) 10741 return true 10742 } 10743 return false 10744 } 10745 func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool { 10746 b := v.Block 10747 _ = b 10748 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10749 // cond: 10750 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 10751 for { 10752 c := v.AuxInt 10753 sym := v.Aux 10754 v_0 := v.Args[0] 10755 if v_0.Op != OpAMD64ADDQconst { 10756 break 10757 } 10758 d := v_0.AuxInt 10759 ptr := v_0.Args[0] 10760 idx := v.Args[1] 10761 val := v.Args[2] 10762 mem := v.Args[3] 10763 v.reset(OpAMD64MOVWstoreidx2) 10764 v.AuxInt = c + d 10765 v.Aux = sym 10766 v.AddArg(ptr) 10767 v.AddArg(idx) 10768 v.AddArg(val) 10769 v.AddArg(mem) 10770 return true 10771 } 10772 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10773 // cond: 10774 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 10775 for { 10776 c := v.AuxInt 10777 sym := v.Aux 10778 ptr := v.Args[0] 10779 v_1 := v.Args[1] 10780 if v_1.Op != OpAMD64ADDQconst { 10781 break 10782 } 10783 d := v_1.AuxInt 10784 idx := v_1.Args[0] 10785 val := v.Args[2] 10786 mem := v.Args[3] 10787 v.reset(OpAMD64MOVWstoreidx2) 10788 v.AuxInt = c + 2*d 10789 v.Aux = sym 10790 v.AddArg(ptr) 10791 v.AddArg(idx) 10792 v.AddArg(val) 10793 v.AddArg(mem) 10794 return true 10795 } 10796 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 10797 // cond: x.Uses == 1 && clobber(x) 10798 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 10799 for { 10800 i := v.AuxInt 10801 s := v.Aux 10802 p := v.Args[0] 10803 idx := v.Args[1] 10804 v_2 := v.Args[2] 10805 if v_2.Op != OpAMD64SHRQconst { 10806 break 10807 } 10808 if v_2.AuxInt != 16 { 10809 break 10810 } 10811 w := v_2.Args[0] 10812 x := v.Args[3] 10813 if x.Op != OpAMD64MOVWstoreidx2 { 10814 break 10815 } 10816 if x.AuxInt != i-2 { 10817 break 10818 } 10819 if x.Aux != s { 10820 break 10821 } 10822 if p != x.Args[0] { 10823 break 10824 } 10825 if idx != x.Args[1] { 10826 break 10827 } 10828 if w != x.Args[2] { 10829 break 10830 } 10831 mem := x.Args[3] 10832 if !(x.Uses == 1 && clobber(x)) { 10833 break 10834 } 10835 v.reset(OpAMD64MOVLstoreidx1) 10836 v.AuxInt = i - 2 10837 v.Aux = s 10838 v.AddArg(p) 10839 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 10840 v0.AuxInt = 1 10841 v0.AddArg(idx) 10842 v.AddArg(v0) 10843 v.AddArg(w) 10844 v.AddArg(mem) 10845 return true 10846 } 10847 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 10848 // cond: x.Uses == 1 && clobber(x) 10849 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 10850 for { 10851 i := v.AuxInt 10852 s := v.Aux 10853 p := v.Args[0] 10854 idx := v.Args[1] 10855 v_2 := v.Args[2] 10856 if v_2.Op != OpAMD64SHRQconst { 10857 break 10858 } 10859 j := v_2.AuxInt 10860 w := v_2.Args[0] 10861 x := v.Args[3] 10862 if x.Op != OpAMD64MOVWstoreidx2 { 10863 break 10864 } 10865 if x.AuxInt != i-2 { 10866 break 10867 } 10868 if x.Aux != s { 10869 break 10870 } 10871 if p != x.Args[0] { 10872 break 10873 } 10874 if idx != x.Args[1] { 10875 break 10876 } 10877 w0 := x.Args[2] 10878 if w0.Op != OpAMD64SHRQconst { 10879 break 10880 } 10881 if w0.AuxInt != j-16 { 10882 break 10883 } 10884 if w != w0.Args[0] { 10885 break 10886 } 10887 mem := x.Args[3] 10888 if !(x.Uses == 1 && clobber(x)) { 10889 break 10890 } 10891 v.reset(OpAMD64MOVLstoreidx1) 10892 v.AuxInt = i - 2 10893 v.Aux = s 10894 v.AddArg(p) 10895 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 10896 v0.AuxInt = 1 10897 v0.AddArg(idx) 10898 v.AddArg(v0) 10899 v.AddArg(w0) 10900 v.AddArg(mem) 10901 return true 10902 } 10903 return false 10904 } 10905 func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool { 10906 b := v.Block 10907 _ = b 10908 // match: (MULL x (MOVLconst [c])) 10909 // cond: 10910 // result: (MULLconst [c] x) 10911 for { 10912 x := v.Args[0] 10913 v_1 := v.Args[1] 10914 if v_1.Op != OpAMD64MOVLconst { 10915 break 10916 } 10917 c := v_1.AuxInt 10918 v.reset(OpAMD64MULLconst) 10919 v.AuxInt = c 10920 v.AddArg(x) 10921 return true 10922 } 10923 // match: (MULL (MOVLconst [c]) x) 10924 // cond: 10925 // result: (MULLconst [c] x) 10926 for { 10927 v_0 := v.Args[0] 10928 if v_0.Op != OpAMD64MOVLconst { 10929 break 10930 } 10931 c := v_0.AuxInt 10932 x := v.Args[1] 10933 v.reset(OpAMD64MULLconst) 10934 v.AuxInt = c 10935 v.AddArg(x) 10936 return true 10937 } 10938 return false 10939 } 10940 func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool { 10941 b := v.Block 10942 _ = b 10943 // match: (MULLconst [c] (MULLconst [d] x)) 10944 // cond: 10945 // result: (MULLconst [int64(int32(c * d))] x) 10946 for { 10947 c := v.AuxInt 10948 v_0 := v.Args[0] 10949 if v_0.Op != OpAMD64MULLconst { 10950 break 10951 } 10952 d := v_0.AuxInt 10953 x := v_0.Args[0] 10954 v.reset(OpAMD64MULLconst) 10955 v.AuxInt = int64(int32(c * d)) 10956 v.AddArg(x) 10957 return true 10958 } 10959 // match: (MULLconst [c] (MOVLconst [d])) 10960 // cond: 10961 // result: (MOVLconst [int64(int32(c*d))]) 10962 for { 10963 c := v.AuxInt 10964 v_0 := v.Args[0] 10965 if v_0.Op != OpAMD64MOVLconst { 10966 break 10967 } 10968 d := v_0.AuxInt 10969 v.reset(OpAMD64MOVLconst) 10970 v.AuxInt = int64(int32(c * d)) 10971 return true 10972 } 10973 return false 10974 } 10975 func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool { 10976 b := v.Block 10977 _ = b 10978 // match: (MULQ x (MOVQconst [c])) 10979 // cond: is32Bit(c) 10980 // result: (MULQconst [c] x) 10981 for { 10982 x := v.Args[0] 10983 v_1 := v.Args[1] 10984 if v_1.Op != OpAMD64MOVQconst { 10985 break 10986 } 10987 c := v_1.AuxInt 10988 if !(is32Bit(c)) { 10989 break 10990 } 10991 v.reset(OpAMD64MULQconst) 10992 v.AuxInt = c 10993 v.AddArg(x) 10994 return true 10995 } 10996 // match: (MULQ (MOVQconst [c]) x) 10997 // cond: is32Bit(c) 10998 // result: (MULQconst [c] x) 10999 for { 11000 v_0 := v.Args[0] 11001 if v_0.Op != OpAMD64MOVQconst { 11002 break 11003 } 11004 c := v_0.AuxInt 11005 x := v.Args[1] 11006 if !(is32Bit(c)) { 11007 break 11008 } 11009 v.reset(OpAMD64MULQconst) 11010 v.AuxInt = c 11011 v.AddArg(x) 11012 return true 11013 } 11014 return false 11015 } 11016 func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool { 11017 b := v.Block 11018 _ = b 11019 // match: (MULQconst [c] (MULQconst [d] x)) 11020 // cond: is32Bit(c*d) 11021 // result: (MULQconst [c * d] x) 11022 for { 11023 c := v.AuxInt 11024 v_0 := v.Args[0] 11025 if v_0.Op != OpAMD64MULQconst { 11026 break 11027 } 11028 d := v_0.AuxInt 11029 x := v_0.Args[0] 11030 if !(is32Bit(c * d)) { 11031 break 11032 } 11033 v.reset(OpAMD64MULQconst) 11034 v.AuxInt = c * d 11035 v.AddArg(x) 11036 return true 11037 } 11038 // match: (MULQconst [-1] x) 11039 // cond: 11040 // result: (NEGQ x) 11041 for { 11042 if v.AuxInt != -1 { 11043 break 11044 } 11045 x := v.Args[0] 11046 v.reset(OpAMD64NEGQ) 11047 v.AddArg(x) 11048 return true 11049 } 11050 // match: (MULQconst [0] _) 11051 // cond: 11052 // result: (MOVQconst [0]) 11053 for { 11054 if v.AuxInt != 0 { 11055 break 11056 } 11057 v.reset(OpAMD64MOVQconst) 11058 v.AuxInt = 0 11059 return true 11060 } 11061 // match: (MULQconst [1] x) 11062 // cond: 11063 // result: x 11064 for { 11065 if v.AuxInt != 1 { 11066 break 11067 } 11068 x := v.Args[0] 11069 v.reset(OpCopy) 11070 v.Type = x.Type 11071 v.AddArg(x) 11072 return true 11073 } 11074 // match: (MULQconst [3] x) 11075 // cond: 11076 // result: (LEAQ2 x x) 11077 for { 11078 if v.AuxInt != 3 { 11079 break 11080 } 11081 x := v.Args[0] 11082 v.reset(OpAMD64LEAQ2) 11083 v.AddArg(x) 11084 v.AddArg(x) 11085 return true 11086 } 11087 // match: (MULQconst [5] x) 11088 // cond: 11089 // result: (LEAQ4 x x) 11090 for { 11091 if v.AuxInt != 5 { 11092 break 11093 } 11094 x := v.Args[0] 11095 v.reset(OpAMD64LEAQ4) 11096 v.AddArg(x) 11097 v.AddArg(x) 11098 return true 11099 } 11100 // match: (MULQconst [7] x) 11101 // cond: 11102 // result: (LEAQ8 (NEGQ <v.Type> x) x) 11103 for { 11104 if v.AuxInt != 7 { 11105 break 11106 } 11107 x := v.Args[0] 11108 v.reset(OpAMD64LEAQ8) 11109 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) 11110 v0.AddArg(x) 11111 v.AddArg(v0) 11112 v.AddArg(x) 11113 return true 11114 } 11115 // match: (MULQconst [9] x) 11116 // cond: 11117 // result: (LEAQ8 x x) 11118 for { 11119 if v.AuxInt != 9 { 11120 break 11121 } 11122 x := v.Args[0] 11123 v.reset(OpAMD64LEAQ8) 11124 v.AddArg(x) 11125 v.AddArg(x) 11126 return true 11127 } 11128 // match: (MULQconst [11] x) 11129 // cond: 11130 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 11131 for { 11132 if v.AuxInt != 11 { 11133 break 11134 } 11135 x := v.Args[0] 11136 v.reset(OpAMD64LEAQ2) 11137 v.AddArg(x) 11138 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 11139 v0.AddArg(x) 11140 v0.AddArg(x) 11141 v.AddArg(v0) 11142 return true 11143 } 11144 // match: (MULQconst [13] x) 11145 // cond: 11146 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 11147 for { 11148 if v.AuxInt != 13 { 11149 break 11150 } 11151 x := v.Args[0] 11152 v.reset(OpAMD64LEAQ4) 11153 v.AddArg(x) 11154 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 11155 v0.AddArg(x) 11156 v0.AddArg(x) 11157 v.AddArg(v0) 11158 return true 11159 } 11160 // match: (MULQconst [21] x) 11161 // cond: 11162 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 11163 for { 11164 if v.AuxInt != 21 { 11165 break 11166 } 11167 x := v.Args[0] 11168 v.reset(OpAMD64LEAQ4) 11169 v.AddArg(x) 11170 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 11171 v0.AddArg(x) 11172 v0.AddArg(x) 11173 v.AddArg(v0) 11174 return true 11175 } 11176 // match: (MULQconst [25] x) 11177 // cond: 11178 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 11179 for { 11180 if v.AuxInt != 25 { 11181 break 11182 } 11183 x := v.Args[0] 11184 v.reset(OpAMD64LEAQ8) 11185 v.AddArg(x) 11186 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 11187 v0.AddArg(x) 11188 v0.AddArg(x) 11189 v.AddArg(v0) 11190 return true 11191 } 11192 // match: (MULQconst [37] x) 11193 // cond: 11194 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 11195 for { 11196 if v.AuxInt != 37 { 11197 break 11198 } 11199 x := v.Args[0] 11200 v.reset(OpAMD64LEAQ4) 11201 v.AddArg(x) 11202 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 11203 v0.AddArg(x) 11204 v0.AddArg(x) 11205 v.AddArg(v0) 11206 return true 11207 } 11208 // match: (MULQconst [41] x) 11209 // cond: 11210 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 11211 for { 11212 if v.AuxInt != 41 { 11213 break 11214 } 11215 x := v.Args[0] 11216 v.reset(OpAMD64LEAQ8) 11217 v.AddArg(x) 11218 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 11219 v0.AddArg(x) 11220 v0.AddArg(x) 11221 v.AddArg(v0) 11222 return true 11223 } 11224 // match: (MULQconst [73] x) 11225 // cond: 11226 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 11227 for { 11228 if v.AuxInt != 73 { 11229 break 11230 } 11231 x := v.Args[0] 11232 v.reset(OpAMD64LEAQ8) 11233 v.AddArg(x) 11234 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 11235 v0.AddArg(x) 11236 v0.AddArg(x) 11237 v.AddArg(v0) 11238 return true 11239 } 11240 // match: (MULQconst [c] x) 11241 // cond: isPowerOfTwo(c) 11242 // result: (SHLQconst [log2(c)] x) 11243 for { 11244 c := v.AuxInt 11245 x := v.Args[0] 11246 if !(isPowerOfTwo(c)) { 11247 break 11248 } 11249 v.reset(OpAMD64SHLQconst) 11250 v.AuxInt = log2(c) 11251 v.AddArg(x) 11252 return true 11253 } 11254 // match: (MULQconst [c] x) 11255 // cond: isPowerOfTwo(c+1) && c >= 15 11256 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 11257 for { 11258 c := v.AuxInt 11259 x := v.Args[0] 11260 if !(isPowerOfTwo(c+1) && c >= 15) { 11261 break 11262 } 11263 v.reset(OpAMD64SUBQ) 11264 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 11265 v0.AuxInt = log2(c + 1) 11266 v0.AddArg(x) 11267 v.AddArg(v0) 11268 v.AddArg(x) 11269 return true 11270 } 11271 // match: (MULQconst [c] x) 11272 // cond: isPowerOfTwo(c-1) && c >= 17 11273 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 11274 for { 11275 c := v.AuxInt 11276 x := v.Args[0] 11277 if !(isPowerOfTwo(c-1) && c >= 17) { 11278 break 11279 } 11280 v.reset(OpAMD64LEAQ1) 11281 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 11282 v0.AuxInt = log2(c - 1) 11283 v0.AddArg(x) 11284 v.AddArg(v0) 11285 v.AddArg(x) 11286 return true 11287 } 11288 // match: (MULQconst [c] x) 11289 // cond: isPowerOfTwo(c-2) && c >= 34 11290 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 11291 for { 11292 c := v.AuxInt 11293 x := v.Args[0] 11294 if !(isPowerOfTwo(c-2) && c >= 34) { 11295 break 11296 } 11297 v.reset(OpAMD64LEAQ2) 11298 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 11299 v0.AuxInt = log2(c - 2) 11300 v0.AddArg(x) 11301 v.AddArg(v0) 11302 v.AddArg(x) 11303 return true 11304 } 11305 // match: (MULQconst [c] x) 11306 // cond: isPowerOfTwo(c-4) && c >= 68 11307 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 11308 for { 11309 c := v.AuxInt 11310 x := v.Args[0] 11311 if !(isPowerOfTwo(c-4) && c >= 68) { 11312 break 11313 } 11314 v.reset(OpAMD64LEAQ4) 11315 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 11316 v0.AuxInt = log2(c - 4) 11317 v0.AddArg(x) 11318 v.AddArg(v0) 11319 v.AddArg(x) 11320 return true 11321 } 11322 // match: (MULQconst [c] x) 11323 // cond: isPowerOfTwo(c-8) && c >= 136 11324 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 11325 for { 11326 c := v.AuxInt 11327 x := v.Args[0] 11328 if !(isPowerOfTwo(c-8) && c >= 136) { 11329 break 11330 } 11331 v.reset(OpAMD64LEAQ8) 11332 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 11333 v0.AuxInt = log2(c - 8) 11334 v0.AddArg(x) 11335 v.AddArg(v0) 11336 v.AddArg(x) 11337 return true 11338 } 11339 // match: (MULQconst [c] x) 11340 // cond: c%3 == 0 && isPowerOfTwo(c/3) 11341 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 11342 for { 11343 c := v.AuxInt 11344 x := v.Args[0] 11345 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 11346 break 11347 } 11348 v.reset(OpAMD64SHLQconst) 11349 v.AuxInt = log2(c / 3) 11350 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 11351 v0.AddArg(x) 11352 v0.AddArg(x) 11353 v.AddArg(v0) 11354 return true 11355 } 11356 // match: (MULQconst [c] x) 11357 // cond: c%5 == 0 && isPowerOfTwo(c/5) 11358 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 11359 for { 11360 c := v.AuxInt 11361 x := v.Args[0] 11362 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 11363 break 11364 } 11365 v.reset(OpAMD64SHLQconst) 11366 v.AuxInt = log2(c / 5) 11367 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 11368 v0.AddArg(x) 11369 v0.AddArg(x) 11370 v.AddArg(v0) 11371 return true 11372 } 11373 // match: (MULQconst [c] x) 11374 // cond: c%9 == 0 && isPowerOfTwo(c/9) 11375 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 11376 for { 11377 c := v.AuxInt 11378 x := v.Args[0] 11379 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 11380 break 11381 } 11382 v.reset(OpAMD64SHLQconst) 11383 v.AuxInt = log2(c / 9) 11384 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 11385 v0.AddArg(x) 11386 v0.AddArg(x) 11387 v.AddArg(v0) 11388 return true 11389 } 11390 // match: (MULQconst [c] (MOVQconst [d])) 11391 // cond: 11392 // result: (MOVQconst [c*d]) 11393 for { 11394 c := v.AuxInt 11395 v_0 := v.Args[0] 11396 if v_0.Op != OpAMD64MOVQconst { 11397 break 11398 } 11399 d := v_0.AuxInt 11400 v.reset(OpAMD64MOVQconst) 11401 v.AuxInt = c * d 11402 return true 11403 } 11404 return false 11405 } 11406 func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool { 11407 b := v.Block 11408 _ = b 11409 // match: (NEGL (MOVLconst [c])) 11410 // cond: 11411 // result: (MOVLconst [int64(int32(-c))]) 11412 for { 11413 v_0 := v.Args[0] 11414 if v_0.Op != OpAMD64MOVLconst { 11415 break 11416 } 11417 c := v_0.AuxInt 11418 v.reset(OpAMD64MOVLconst) 11419 v.AuxInt = int64(int32(-c)) 11420 return true 11421 } 11422 return false 11423 } 11424 func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool { 11425 b := v.Block 11426 _ = b 11427 // match: (NEGQ (MOVQconst [c])) 11428 // cond: 11429 // result: (MOVQconst [-c]) 11430 for { 11431 v_0 := v.Args[0] 11432 if v_0.Op != OpAMD64MOVQconst { 11433 break 11434 } 11435 c := v_0.AuxInt 11436 v.reset(OpAMD64MOVQconst) 11437 v.AuxInt = -c 11438 return true 11439 } 11440 return false 11441 } 11442 func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool { 11443 b := v.Block 11444 _ = b 11445 // match: (NOTL (MOVLconst [c])) 11446 // cond: 11447 // result: (MOVLconst [^c]) 11448 for { 11449 v_0 := v.Args[0] 11450 if v_0.Op != OpAMD64MOVLconst { 11451 break 11452 } 11453 c := v_0.AuxInt 11454 v.reset(OpAMD64MOVLconst) 11455 v.AuxInt = ^c 11456 return true 11457 } 11458 return false 11459 } 11460 func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool { 11461 b := v.Block 11462 _ = b 11463 // match: (NOTQ (MOVQconst [c])) 11464 // cond: 11465 // result: (MOVQconst [^c]) 11466 for { 11467 v_0 := v.Args[0] 11468 if v_0.Op != OpAMD64MOVQconst { 11469 break 11470 } 11471 c := v_0.AuxInt 11472 v.reset(OpAMD64MOVQconst) 11473 v.AuxInt = ^c 11474 return true 11475 } 11476 return false 11477 } 11478 func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { 11479 b := v.Block 11480 _ = b 11481 // match: (ORL x (MOVLconst [c])) 11482 // cond: 11483 // result: (ORLconst [c] x) 11484 for { 11485 x := v.Args[0] 11486 v_1 := v.Args[1] 11487 if v_1.Op != OpAMD64MOVLconst { 11488 break 11489 } 11490 c := v_1.AuxInt 11491 v.reset(OpAMD64ORLconst) 11492 v.AuxInt = c 11493 v.AddArg(x) 11494 return true 11495 } 11496 // match: (ORL (MOVLconst [c]) x) 11497 // cond: 11498 // result: (ORLconst [c] x) 11499 for { 11500 v_0 := v.Args[0] 11501 if v_0.Op != OpAMD64MOVLconst { 11502 break 11503 } 11504 c := v_0.AuxInt 11505 x := v.Args[1] 11506 v.reset(OpAMD64ORLconst) 11507 v.AuxInt = c 11508 v.AddArg(x) 11509 return true 11510 } 11511 // match: ( ORL (SHLLconst x [c]) (SHRLconst x [32-c])) 11512 // cond: 11513 // result: (ROLLconst x [ c]) 11514 for { 11515 v_0 := v.Args[0] 11516 if v_0.Op != OpAMD64SHLLconst { 11517 break 11518 } 11519 c := v_0.AuxInt 11520 x := v_0.Args[0] 11521 v_1 := v.Args[1] 11522 if v_1.Op != OpAMD64SHRLconst { 11523 break 11524 } 11525 if v_1.AuxInt != 32-c { 11526 break 11527 } 11528 if x != v_1.Args[0] { 11529 break 11530 } 11531 v.reset(OpAMD64ROLLconst) 11532 v.AuxInt = c 11533 v.AddArg(x) 11534 return true 11535 } 11536 // match: ( ORL (SHRLconst x [c]) (SHLLconst x [32-c])) 11537 // cond: 11538 // result: (ROLLconst x [32-c]) 11539 for { 11540 v_0 := v.Args[0] 11541 if v_0.Op != OpAMD64SHRLconst { 11542 break 11543 } 11544 c := v_0.AuxInt 11545 x := v_0.Args[0] 11546 v_1 := v.Args[1] 11547 if v_1.Op != OpAMD64SHLLconst { 11548 break 11549 } 11550 if v_1.AuxInt != 32-c { 11551 break 11552 } 11553 if x != v_1.Args[0] { 11554 break 11555 } 11556 v.reset(OpAMD64ROLLconst) 11557 v.AuxInt = 32 - c 11558 v.AddArg(x) 11559 return true 11560 } 11561 // match: ( ORL <t> (SHLLconst x [c]) (SHRWconst x [16-c])) 11562 // cond: c < 16 && t.Size() == 2 11563 // result: (ROLWconst x [ c]) 11564 for { 11565 t := v.Type 11566 v_0 := v.Args[0] 11567 if v_0.Op != OpAMD64SHLLconst { 11568 break 11569 } 11570 c := v_0.AuxInt 11571 x := v_0.Args[0] 11572 v_1 := v.Args[1] 11573 if v_1.Op != OpAMD64SHRWconst { 11574 break 11575 } 11576 if v_1.AuxInt != 16-c { 11577 break 11578 } 11579 if x != v_1.Args[0] { 11580 break 11581 } 11582 if !(c < 16 && t.Size() == 2) { 11583 break 11584 } 11585 v.reset(OpAMD64ROLWconst) 11586 v.AuxInt = c 11587 v.AddArg(x) 11588 return true 11589 } 11590 // match: ( ORL <t> (SHRWconst x [c]) (SHLLconst x [16-c])) 11591 // cond: c > 0 && t.Size() == 2 11592 // result: (ROLWconst x [16-c]) 11593 for { 11594 t := v.Type 11595 v_0 := v.Args[0] 11596 if v_0.Op != OpAMD64SHRWconst { 11597 break 11598 } 11599 c := v_0.AuxInt 11600 x := v_0.Args[0] 11601 v_1 := v.Args[1] 11602 if v_1.Op != OpAMD64SHLLconst { 11603 break 11604 } 11605 if v_1.AuxInt != 16-c { 11606 break 11607 } 11608 if x != v_1.Args[0] { 11609 break 11610 } 11611 if !(c > 0 && t.Size() == 2) { 11612 break 11613 } 11614 v.reset(OpAMD64ROLWconst) 11615 v.AuxInt = 16 - c 11616 v.AddArg(x) 11617 return true 11618 } 11619 // match: ( ORL <t> (SHLLconst x [c]) (SHRBconst x [ 8-c])) 11620 // cond: c < 8 && t.Size() == 1 11621 // result: (ROLBconst x [ c]) 11622 for { 11623 t := v.Type 11624 v_0 := v.Args[0] 11625 if v_0.Op != OpAMD64SHLLconst { 11626 break 11627 } 11628 c := v_0.AuxInt 11629 x := v_0.Args[0] 11630 v_1 := v.Args[1] 11631 if v_1.Op != OpAMD64SHRBconst { 11632 break 11633 } 11634 if v_1.AuxInt != 8-c { 11635 break 11636 } 11637 if x != v_1.Args[0] { 11638 break 11639 } 11640 if !(c < 8 && t.Size() == 1) { 11641 break 11642 } 11643 v.reset(OpAMD64ROLBconst) 11644 v.AuxInt = c 11645 v.AddArg(x) 11646 return true 11647 } 11648 // match: ( ORL <t> (SHRBconst x [c]) (SHLLconst x [ 8-c])) 11649 // cond: c > 0 && t.Size() == 1 11650 // result: (ROLBconst x [ 8-c]) 11651 for { 11652 t := v.Type 11653 v_0 := v.Args[0] 11654 if v_0.Op != OpAMD64SHRBconst { 11655 break 11656 } 11657 c := v_0.AuxInt 11658 x := v_0.Args[0] 11659 v_1 := v.Args[1] 11660 if v_1.Op != OpAMD64SHLLconst { 11661 break 11662 } 11663 if v_1.AuxInt != 8-c { 11664 break 11665 } 11666 if x != v_1.Args[0] { 11667 break 11668 } 11669 if !(c > 0 && t.Size() == 1) { 11670 break 11671 } 11672 v.reset(OpAMD64ROLBconst) 11673 v.AuxInt = 8 - c 11674 v.AddArg(x) 11675 return true 11676 } 11677 // match: (ORL x x) 11678 // cond: 11679 // result: x 11680 for { 11681 x := v.Args[0] 11682 if x != v.Args[1] { 11683 break 11684 } 11685 v.reset(OpCopy) 11686 v.Type = x.Type 11687 v.AddArg(x) 11688 return true 11689 } 11690 // match: (ORL x:(SHLLconst _) y) 11691 // cond: y.Op != OpAMD64SHLLconst 11692 // result: (ORL y x) 11693 for { 11694 x := v.Args[0] 11695 if x.Op != OpAMD64SHLLconst { 11696 break 11697 } 11698 y := v.Args[1] 11699 if !(y.Op != OpAMD64SHLLconst) { 11700 break 11701 } 11702 v.reset(OpAMD64ORL) 11703 v.AddArg(y) 11704 v.AddArg(x) 11705 return true 11706 } 11707 // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) 11708 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 11709 // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) 11710 for { 11711 x0 := v.Args[0] 11712 if x0.Op != OpAMD64MOVBload { 11713 break 11714 } 11715 i := x0.AuxInt 11716 s := x0.Aux 11717 p := x0.Args[0] 11718 mem := x0.Args[1] 11719 s0 := v.Args[1] 11720 if s0.Op != OpAMD64SHLLconst { 11721 break 11722 } 11723 if s0.AuxInt != 8 { 11724 break 11725 } 11726 x1 := s0.Args[0] 11727 if x1.Op != OpAMD64MOVBload { 11728 break 11729 } 11730 if x1.AuxInt != i+1 { 11731 break 11732 } 11733 if x1.Aux != s { 11734 break 11735 } 11736 if p != x1.Args[0] { 11737 break 11738 } 11739 if mem != x1.Args[1] { 11740 break 11741 } 11742 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 11743 break 11744 } 11745 b = mergePoint(b, x0, x1) 11746 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16()) 11747 v.reset(OpCopy) 11748 v.AddArg(v0) 11749 v0.AuxInt = i 11750 v0.Aux = s 11751 v0.AddArg(p) 11752 v0.AddArg(mem) 11753 return true 11754 } 11755 // match: (ORL o0:(ORL x0:(MOVWload [i] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem))) s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem))) 11756 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) 11757 // result: @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem) 11758 for { 11759 o0 := v.Args[0] 11760 if o0.Op != OpAMD64ORL { 11761 break 11762 } 11763 x0 := o0.Args[0] 11764 if x0.Op != OpAMD64MOVWload { 11765 break 11766 } 11767 i := x0.AuxInt 11768 s := x0.Aux 11769 p := x0.Args[0] 11770 mem := x0.Args[1] 11771 s0 := o0.Args[1] 11772 if s0.Op != OpAMD64SHLLconst { 11773 break 11774 } 11775 if s0.AuxInt != 16 { 11776 break 11777 } 11778 x1 := s0.Args[0] 11779 if x1.Op != OpAMD64MOVBload { 11780 break 11781 } 11782 if x1.AuxInt != i+2 { 11783 break 11784 } 11785 if x1.Aux != s { 11786 break 11787 } 11788 if p != x1.Args[0] { 11789 break 11790 } 11791 if mem != x1.Args[1] { 11792 break 11793 } 11794 s1 := v.Args[1] 11795 if s1.Op != OpAMD64SHLLconst { 11796 break 11797 } 11798 if s1.AuxInt != 24 { 11799 break 11800 } 11801 x2 := s1.Args[0] 11802 if x2.Op != OpAMD64MOVBload { 11803 break 11804 } 11805 if x2.AuxInt != i+3 { 11806 break 11807 } 11808 if x2.Aux != s { 11809 break 11810 } 11811 if p != x2.Args[0] { 11812 break 11813 } 11814 if mem != x2.Args[1] { 11815 break 11816 } 11817 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { 11818 break 11819 } 11820 b = mergePoint(b, x0, x1, x2) 11821 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 11822 v.reset(OpCopy) 11823 v.AddArg(v0) 11824 v0.AuxInt = i 11825 v0.Aux = s 11826 v0.AddArg(p) 11827 v0.AddArg(mem) 11828 return true 11829 } 11830 // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) 11831 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 11832 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem) 11833 for { 11834 x0 := v.Args[0] 11835 if x0.Op != OpAMD64MOVBloadidx1 { 11836 break 11837 } 11838 i := x0.AuxInt 11839 s := x0.Aux 11840 p := x0.Args[0] 11841 idx := x0.Args[1] 11842 mem := x0.Args[2] 11843 s0 := v.Args[1] 11844 if s0.Op != OpAMD64SHLLconst { 11845 break 11846 } 11847 if s0.AuxInt != 8 { 11848 break 11849 } 11850 x1 := s0.Args[0] 11851 if x1.Op != OpAMD64MOVBloadidx1 { 11852 break 11853 } 11854 if x1.AuxInt != i+1 { 11855 break 11856 } 11857 if x1.Aux != s { 11858 break 11859 } 11860 if p != x1.Args[0] { 11861 break 11862 } 11863 if idx != x1.Args[1] { 11864 break 11865 } 11866 if mem != x1.Args[2] { 11867 break 11868 } 11869 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 11870 break 11871 } 11872 b = mergePoint(b, x0, x1) 11873 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 11874 v.reset(OpCopy) 11875 v.AddArg(v0) 11876 v0.AuxInt = i 11877 v0.Aux = s 11878 v0.AddArg(p) 11879 v0.AddArg(idx) 11880 v0.AddArg(mem) 11881 return true 11882 } 11883 // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem))) 11884 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) 11885 // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 <v.Type> [i] {s} p idx mem) 11886 for { 11887 o0 := v.Args[0] 11888 if o0.Op != OpAMD64ORL { 11889 break 11890 } 11891 x0 := o0.Args[0] 11892 if x0.Op != OpAMD64MOVWloadidx1 { 11893 break 11894 } 11895 i := x0.AuxInt 11896 s := x0.Aux 11897 p := x0.Args[0] 11898 idx := x0.Args[1] 11899 mem := x0.Args[2] 11900 s0 := o0.Args[1] 11901 if s0.Op != OpAMD64SHLLconst { 11902 break 11903 } 11904 if s0.AuxInt != 16 { 11905 break 11906 } 11907 x1 := s0.Args[0] 11908 if x1.Op != OpAMD64MOVBloadidx1 { 11909 break 11910 } 11911 if x1.AuxInt != i+2 { 11912 break 11913 } 11914 if x1.Aux != s { 11915 break 11916 } 11917 if p != x1.Args[0] { 11918 break 11919 } 11920 if idx != x1.Args[1] { 11921 break 11922 } 11923 if mem != x1.Args[2] { 11924 break 11925 } 11926 s1 := v.Args[1] 11927 if s1.Op != OpAMD64SHLLconst { 11928 break 11929 } 11930 if s1.AuxInt != 24 { 11931 break 11932 } 11933 x2 := s1.Args[0] 11934 if x2.Op != OpAMD64MOVBloadidx1 { 11935 break 11936 } 11937 if x2.AuxInt != i+3 { 11938 break 11939 } 11940 if x2.Aux != s { 11941 break 11942 } 11943 if p != x2.Args[0] { 11944 break 11945 } 11946 if idx != x2.Args[1] { 11947 break 11948 } 11949 if mem != x2.Args[2] { 11950 break 11951 } 11952 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { 11953 break 11954 } 11955 b = mergePoint(b, x0, x1, x2) 11956 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 11957 v.reset(OpCopy) 11958 v.AddArg(v0) 11959 v0.AuxInt = i 11960 v0.Aux = s 11961 v0.AddArg(p) 11962 v0.AddArg(idx) 11963 v0.AddArg(mem) 11964 return true 11965 } 11966 // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i-1] {s} p mem))) 11967 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 11968 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i-1] {s} p mem)) 11969 for { 11970 x0 := v.Args[0] 11971 if x0.Op != OpAMD64MOVBload { 11972 break 11973 } 11974 i := x0.AuxInt 11975 s := x0.Aux 11976 p := x0.Args[0] 11977 mem := x0.Args[1] 11978 s0 := v.Args[1] 11979 if s0.Op != OpAMD64SHLLconst { 11980 break 11981 } 11982 if s0.AuxInt != 8 { 11983 break 11984 } 11985 x1 := s0.Args[0] 11986 if x1.Op != OpAMD64MOVBload { 11987 break 11988 } 11989 if x1.AuxInt != i-1 { 11990 break 11991 } 11992 if x1.Aux != s { 11993 break 11994 } 11995 if p != x1.Args[0] { 11996 break 11997 } 11998 if mem != x1.Args[1] { 11999 break 12000 } 12001 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 12002 break 12003 } 12004 b = mergePoint(b, x0, x1) 12005 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 12006 v.reset(OpCopy) 12007 v.AddArg(v0) 12008 v0.AuxInt = 8 12009 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16()) 12010 v1.AuxInt = i - 1 12011 v1.Aux = s 12012 v1.AddArg(p) 12013 v1.AddArg(mem) 12014 v0.AddArg(v1) 12015 return true 12016 } 12017 // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i-1] {s} p idx mem))) 12018 // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) 12019 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 <v.Type> [i-1] {s} p idx mem)) 12020 for { 12021 x0 := v.Args[0] 12022 if x0.Op != OpAMD64MOVBloadidx1 { 12023 break 12024 } 12025 i := x0.AuxInt 12026 s := x0.Aux 12027 p := x0.Args[0] 12028 idx := x0.Args[1] 12029 mem := x0.Args[2] 12030 s0 := v.Args[1] 12031 if s0.Op != OpAMD64SHLLconst { 12032 break 12033 } 12034 if s0.AuxInt != 8 { 12035 break 12036 } 12037 x1 := s0.Args[0] 12038 if x1.Op != OpAMD64MOVBloadidx1 { 12039 break 12040 } 12041 if x1.AuxInt != i-1 { 12042 break 12043 } 12044 if x1.Aux != s { 12045 break 12046 } 12047 if p != x1.Args[0] { 12048 break 12049 } 12050 if idx != x1.Args[1] { 12051 break 12052 } 12053 if mem != x1.Args[2] { 12054 break 12055 } 12056 if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { 12057 break 12058 } 12059 b = mergePoint(b, x0, x1) 12060 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 12061 v.reset(OpCopy) 12062 v.AddArg(v0) 12063 v0.AuxInt = 8 12064 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 12065 v1.AuxInt = i - 1 12066 v1.Aux = s 12067 v1.AddArg(p) 12068 v1.AddArg(idx) 12069 v1.AddArg(mem) 12070 v0.AddArg(v1) 12071 return true 12072 } 12073 // match: (ORL o1:(ORL o0:(ROLWconst [8] x01:(MOVWload [i1] {s} p mem)) s1:(SHLLconst [16] x2:(MOVBload [i1-1] {s} p mem))) s2:(SHLLconst [24] x3:(MOVBload [i1-2] {s} p mem))) 12074 // cond: x01.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x01,x2,x3) != nil && clobber(x01) && clobber(x2) && clobber(x3) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) 12075 // result: @mergePoint(b,x01,x2,x3) (BSWAPL <v.Type> (MOVLload [i1-2] {s} p mem)) 12076 for { 12077 o1 := v.Args[0] 12078 if o1.Op != OpAMD64ORL { 12079 break 12080 } 12081 o0 := o1.Args[0] 12082 if o0.Op != OpAMD64ROLWconst { 12083 break 12084 } 12085 if o0.AuxInt != 8 { 12086 break 12087 } 12088 x01 := o0.Args[0] 12089 if x01.Op != OpAMD64MOVWload { 12090 break 12091 } 12092 i1 := x01.AuxInt 12093 s := x01.Aux 12094 p := x01.Args[0] 12095 mem := x01.Args[1] 12096 s1 := o1.Args[1] 12097 if s1.Op != OpAMD64SHLLconst { 12098 break 12099 } 12100 if s1.AuxInt != 16 { 12101 break 12102 } 12103 x2 := s1.Args[0] 12104 if x2.Op != OpAMD64MOVBload { 12105 break 12106 } 12107 if x2.AuxInt != i1-1 { 12108 break 12109 } 12110 if x2.Aux != s { 12111 break 12112 } 12113 if p != x2.Args[0] { 12114 break 12115 } 12116 if mem != x2.Args[1] { 12117 break 12118 } 12119 s2 := v.Args[1] 12120 if s2.Op != OpAMD64SHLLconst { 12121 break 12122 } 12123 if s2.AuxInt != 24 { 12124 break 12125 } 12126 x3 := s2.Args[0] 12127 if x3.Op != OpAMD64MOVBload { 12128 break 12129 } 12130 if x3.AuxInt != i1-2 { 12131 break 12132 } 12133 if x3.Aux != s { 12134 break 12135 } 12136 if p != x3.Args[0] { 12137 break 12138 } 12139 if mem != x3.Args[1] { 12140 break 12141 } 12142 if !(x01.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x01, x2, x3) != nil && clobber(x01) && clobber(x2) && clobber(x3) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { 12143 break 12144 } 12145 b = mergePoint(b, x01, x2, x3) 12146 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 12147 v.reset(OpCopy) 12148 v.AddArg(v0) 12149 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 12150 v1.AuxInt = i1 - 2 12151 v1.Aux = s 12152 v1.AddArg(p) 12153 v1.AddArg(mem) 12154 v0.AddArg(v1) 12155 return true 12156 } 12157 // match: (ORL o1:(ORL o0:(ROLWconst [8] x01:(MOVWloadidx1 [i1] {s} p idx mem)) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i1-1] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i1-2] {s} p idx mem))) 12158 // cond: x01.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x01,x2,x3) != nil && clobber(x01) && clobber(x2) && clobber(x3) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) 12159 // result: @mergePoint(b,x01,x2,x3) (BSWAPL <v.Type> (MOVLloadidx1 <v.Type> [i1-2] {s} p idx mem)) 12160 for { 12161 o1 := v.Args[0] 12162 if o1.Op != OpAMD64ORL { 12163 break 12164 } 12165 o0 := o1.Args[0] 12166 if o0.Op != OpAMD64ROLWconst { 12167 break 12168 } 12169 if o0.AuxInt != 8 { 12170 break 12171 } 12172 x01 := o0.Args[0] 12173 if x01.Op != OpAMD64MOVWloadidx1 { 12174 break 12175 } 12176 i1 := x01.AuxInt 12177 s := x01.Aux 12178 p := x01.Args[0] 12179 idx := x01.Args[1] 12180 mem := x01.Args[2] 12181 s1 := o1.Args[1] 12182 if s1.Op != OpAMD64SHLLconst { 12183 break 12184 } 12185 if s1.AuxInt != 16 { 12186 break 12187 } 12188 x2 := s1.Args[0] 12189 if x2.Op != OpAMD64MOVBloadidx1 { 12190 break 12191 } 12192 if x2.AuxInt != i1-1 { 12193 break 12194 } 12195 if x2.Aux != s { 12196 break 12197 } 12198 if p != x2.Args[0] { 12199 break 12200 } 12201 if idx != x2.Args[1] { 12202 break 12203 } 12204 if mem != x2.Args[2] { 12205 break 12206 } 12207 s2 := v.Args[1] 12208 if s2.Op != OpAMD64SHLLconst { 12209 break 12210 } 12211 if s2.AuxInt != 24 { 12212 break 12213 } 12214 x3 := s2.Args[0] 12215 if x3.Op != OpAMD64MOVBloadidx1 { 12216 break 12217 } 12218 if x3.AuxInt != i1-2 { 12219 break 12220 } 12221 if x3.Aux != s { 12222 break 12223 } 12224 if p != x3.Args[0] { 12225 break 12226 } 12227 if idx != x3.Args[1] { 12228 break 12229 } 12230 if mem != x3.Args[2] { 12231 break 12232 } 12233 if !(x01.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x01, x2, x3) != nil && clobber(x01) && clobber(x2) && clobber(x3) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { 12234 break 12235 } 12236 b = mergePoint(b, x01, x2, x3) 12237 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 12238 v.reset(OpCopy) 12239 v.AddArg(v0) 12240 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 12241 v1.AuxInt = i1 - 2 12242 v1.Aux = s 12243 v1.AddArg(p) 12244 v1.AddArg(idx) 12245 v1.AddArg(mem) 12246 v0.AddArg(v1) 12247 return true 12248 } 12249 return false 12250 } 12251 func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool { 12252 b := v.Block 12253 _ = b 12254 // match: (ORLconst [c] x) 12255 // cond: int32(c)==0 12256 // result: x 12257 for { 12258 c := v.AuxInt 12259 x := v.Args[0] 12260 if !(int32(c) == 0) { 12261 break 12262 } 12263 v.reset(OpCopy) 12264 v.Type = x.Type 12265 v.AddArg(x) 12266 return true 12267 } 12268 // match: (ORLconst [c] _) 12269 // cond: int32(c)==-1 12270 // result: (MOVLconst [-1]) 12271 for { 12272 c := v.AuxInt 12273 if !(int32(c) == -1) { 12274 break 12275 } 12276 v.reset(OpAMD64MOVLconst) 12277 v.AuxInt = -1 12278 return true 12279 } 12280 // match: (ORLconst [c] (MOVLconst [d])) 12281 // cond: 12282 // result: (MOVLconst [c|d]) 12283 for { 12284 c := v.AuxInt 12285 v_0 := v.Args[0] 12286 if v_0.Op != OpAMD64MOVLconst { 12287 break 12288 } 12289 d := v_0.AuxInt 12290 v.reset(OpAMD64MOVLconst) 12291 v.AuxInt = c | d 12292 return true 12293 } 12294 return false 12295 } 12296 func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { 12297 b := v.Block 12298 _ = b 12299 // match: (ORQ x (MOVQconst [c])) 12300 // cond: is32Bit(c) 12301 // result: (ORQconst [c] x) 12302 for { 12303 x := v.Args[0] 12304 v_1 := v.Args[1] 12305 if v_1.Op != OpAMD64MOVQconst { 12306 break 12307 } 12308 c := v_1.AuxInt 12309 if !(is32Bit(c)) { 12310 break 12311 } 12312 v.reset(OpAMD64ORQconst) 12313 v.AuxInt = c 12314 v.AddArg(x) 12315 return true 12316 } 12317 // match: (ORQ (MOVQconst [c]) x) 12318 // cond: is32Bit(c) 12319 // result: (ORQconst [c] x) 12320 for { 12321 v_0 := v.Args[0] 12322 if v_0.Op != OpAMD64MOVQconst { 12323 break 12324 } 12325 c := v_0.AuxInt 12326 x := v.Args[1] 12327 if !(is32Bit(c)) { 12328 break 12329 } 12330 v.reset(OpAMD64ORQconst) 12331 v.AuxInt = c 12332 v.AddArg(x) 12333 return true 12334 } 12335 // match: ( ORQ (SHLQconst x [c]) (SHRQconst x [64-c])) 12336 // cond: 12337 // result: (ROLQconst x [ c]) 12338 for { 12339 v_0 := v.Args[0] 12340 if v_0.Op != OpAMD64SHLQconst { 12341 break 12342 } 12343 c := v_0.AuxInt 12344 x := v_0.Args[0] 12345 v_1 := v.Args[1] 12346 if v_1.Op != OpAMD64SHRQconst { 12347 break 12348 } 12349 if v_1.AuxInt != 64-c { 12350 break 12351 } 12352 if x != v_1.Args[0] { 12353 break 12354 } 12355 v.reset(OpAMD64ROLQconst) 12356 v.AuxInt = c 12357 v.AddArg(x) 12358 return true 12359 } 12360 // match: ( ORQ (SHRQconst x [c]) (SHLQconst x [64-c])) 12361 // cond: 12362 // result: (ROLQconst x [64-c]) 12363 for { 12364 v_0 := v.Args[0] 12365 if v_0.Op != OpAMD64SHRQconst { 12366 break 12367 } 12368 c := v_0.AuxInt 12369 x := v_0.Args[0] 12370 v_1 := v.Args[1] 12371 if v_1.Op != OpAMD64SHLQconst { 12372 break 12373 } 12374 if v_1.AuxInt != 64-c { 12375 break 12376 } 12377 if x != v_1.Args[0] { 12378 break 12379 } 12380 v.reset(OpAMD64ROLQconst) 12381 v.AuxInt = 64 - c 12382 v.AddArg(x) 12383 return true 12384 } 12385 // match: (ORQ x x) 12386 // cond: 12387 // result: x 12388 for { 12389 x := v.Args[0] 12390 if x != v.Args[1] { 12391 break 12392 } 12393 v.reset(OpCopy) 12394 v.Type = x.Type 12395 v.AddArg(x) 12396 return true 12397 } 12398 // match: (ORQ x:(SHLQconst _) y) 12399 // cond: y.Op != OpAMD64SHLQconst 12400 // result: (ORQ y x) 12401 for { 12402 x := v.Args[0] 12403 if x.Op != OpAMD64SHLQconst { 12404 break 12405 } 12406 y := v.Args[1] 12407 if !(y.Op != OpAMD64SHLQconst) { 12408 break 12409 } 12410 v.reset(OpAMD64ORQ) 12411 v.AddArg(y) 12412 v.AddArg(x) 12413 return true 12414 } 12415 // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBload [i] {s} p mem) s0:(SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) 12416 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 12417 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) 12418 for { 12419 o0 := v.Args[0] 12420 if o0.Op != OpAMD64ORQ { 12421 break 12422 } 12423 o1 := o0.Args[0] 12424 if o1.Op != OpAMD64ORQ { 12425 break 12426 } 12427 o2 := o1.Args[0] 12428 if o2.Op != OpAMD64ORQ { 12429 break 12430 } 12431 o3 := o2.Args[0] 12432 if o3.Op != OpAMD64ORQ { 12433 break 12434 } 12435 o4 := o3.Args[0] 12436 if o4.Op != OpAMD64ORQ { 12437 break 12438 } 12439 o5 := o4.Args[0] 12440 if o5.Op != OpAMD64ORQ { 12441 break 12442 } 12443 x0 := o5.Args[0] 12444 if x0.Op != OpAMD64MOVBload { 12445 break 12446 } 12447 i := x0.AuxInt 12448 s := x0.Aux 12449 p := x0.Args[0] 12450 mem := x0.Args[1] 12451 s0 := o5.Args[1] 12452 if s0.Op != OpAMD64SHLQconst { 12453 break 12454 } 12455 if s0.AuxInt != 8 { 12456 break 12457 } 12458 x1 := s0.Args[0] 12459 if x1.Op != OpAMD64MOVBload { 12460 break 12461 } 12462 if x1.AuxInt != i+1 { 12463 break 12464 } 12465 if x1.Aux != s { 12466 break 12467 } 12468 if p != x1.Args[0] { 12469 break 12470 } 12471 if mem != x1.Args[1] { 12472 break 12473 } 12474 s1 := o4.Args[1] 12475 if s1.Op != OpAMD64SHLQconst { 12476 break 12477 } 12478 if s1.AuxInt != 16 { 12479 break 12480 } 12481 x2 := s1.Args[0] 12482 if x2.Op != OpAMD64MOVBload { 12483 break 12484 } 12485 if x2.AuxInt != i+2 { 12486 break 12487 } 12488 if x2.Aux != s { 12489 break 12490 } 12491 if p != x2.Args[0] { 12492 break 12493 } 12494 if mem != x2.Args[1] { 12495 break 12496 } 12497 s2 := o3.Args[1] 12498 if s2.Op != OpAMD64SHLQconst { 12499 break 12500 } 12501 if s2.AuxInt != 24 { 12502 break 12503 } 12504 x3 := s2.Args[0] 12505 if x3.Op != OpAMD64MOVBload { 12506 break 12507 } 12508 if x3.AuxInt != i+3 { 12509 break 12510 } 12511 if x3.Aux != s { 12512 break 12513 } 12514 if p != x3.Args[0] { 12515 break 12516 } 12517 if mem != x3.Args[1] { 12518 break 12519 } 12520 s3 := o2.Args[1] 12521 if s3.Op != OpAMD64SHLQconst { 12522 break 12523 } 12524 if s3.AuxInt != 32 { 12525 break 12526 } 12527 x4 := s3.Args[0] 12528 if x4.Op != OpAMD64MOVBload { 12529 break 12530 } 12531 if x4.AuxInt != i+4 { 12532 break 12533 } 12534 if x4.Aux != s { 12535 break 12536 } 12537 if p != x4.Args[0] { 12538 break 12539 } 12540 if mem != x4.Args[1] { 12541 break 12542 } 12543 s4 := o1.Args[1] 12544 if s4.Op != OpAMD64SHLQconst { 12545 break 12546 } 12547 if s4.AuxInt != 40 { 12548 break 12549 } 12550 x5 := s4.Args[0] 12551 if x5.Op != OpAMD64MOVBload { 12552 break 12553 } 12554 if x5.AuxInt != i+5 { 12555 break 12556 } 12557 if x5.Aux != s { 12558 break 12559 } 12560 if p != x5.Args[0] { 12561 break 12562 } 12563 if mem != x5.Args[1] { 12564 break 12565 } 12566 s5 := o0.Args[1] 12567 if s5.Op != OpAMD64SHLQconst { 12568 break 12569 } 12570 if s5.AuxInt != 48 { 12571 break 12572 } 12573 x6 := s5.Args[0] 12574 if x6.Op != OpAMD64MOVBload { 12575 break 12576 } 12577 if x6.AuxInt != i+6 { 12578 break 12579 } 12580 if x6.Aux != s { 12581 break 12582 } 12583 if p != x6.Args[0] { 12584 break 12585 } 12586 if mem != x6.Args[1] { 12587 break 12588 } 12589 s6 := v.Args[1] 12590 if s6.Op != OpAMD64SHLQconst { 12591 break 12592 } 12593 if s6.AuxInt != 56 { 12594 break 12595 } 12596 x7 := s6.Args[0] 12597 if x7.Op != OpAMD64MOVBload { 12598 break 12599 } 12600 if x7.AuxInt != i+7 { 12601 break 12602 } 12603 if x7.Aux != s { 12604 break 12605 } 12606 if p != x7.Args[0] { 12607 break 12608 } 12609 if mem != x7.Args[1] { 12610 break 12611 } 12612 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 12613 break 12614 } 12615 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 12616 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64()) 12617 v.reset(OpCopy) 12618 v.AddArg(v0) 12619 v0.AuxInt = i 12620 v0.Aux = s 12621 v0.AddArg(p) 12622 v0.AddArg(mem) 12623 return true 12624 } 12625 // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) 12626 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 12627 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem) 12628 for { 12629 o0 := v.Args[0] 12630 if o0.Op != OpAMD64ORQ { 12631 break 12632 } 12633 o1 := o0.Args[0] 12634 if o1.Op != OpAMD64ORQ { 12635 break 12636 } 12637 o2 := o1.Args[0] 12638 if o2.Op != OpAMD64ORQ { 12639 break 12640 } 12641 o3 := o2.Args[0] 12642 if o3.Op != OpAMD64ORQ { 12643 break 12644 } 12645 o4 := o3.Args[0] 12646 if o4.Op != OpAMD64ORQ { 12647 break 12648 } 12649 o5 := o4.Args[0] 12650 if o5.Op != OpAMD64ORQ { 12651 break 12652 } 12653 x0 := o5.Args[0] 12654 if x0.Op != OpAMD64MOVBloadidx1 { 12655 break 12656 } 12657 i := x0.AuxInt 12658 s := x0.Aux 12659 p := x0.Args[0] 12660 idx := x0.Args[1] 12661 mem := x0.Args[2] 12662 s0 := o5.Args[1] 12663 if s0.Op != OpAMD64SHLQconst { 12664 break 12665 } 12666 if s0.AuxInt != 8 { 12667 break 12668 } 12669 x1 := s0.Args[0] 12670 if x1.Op != OpAMD64MOVBloadidx1 { 12671 break 12672 } 12673 if x1.AuxInt != i+1 { 12674 break 12675 } 12676 if x1.Aux != s { 12677 break 12678 } 12679 if p != x1.Args[0] { 12680 break 12681 } 12682 if idx != x1.Args[1] { 12683 break 12684 } 12685 if mem != x1.Args[2] { 12686 break 12687 } 12688 s1 := o4.Args[1] 12689 if s1.Op != OpAMD64SHLQconst { 12690 break 12691 } 12692 if s1.AuxInt != 16 { 12693 break 12694 } 12695 x2 := s1.Args[0] 12696 if x2.Op != OpAMD64MOVBloadidx1 { 12697 break 12698 } 12699 if x2.AuxInt != i+2 { 12700 break 12701 } 12702 if x2.Aux != s { 12703 break 12704 } 12705 if p != x2.Args[0] { 12706 break 12707 } 12708 if idx != x2.Args[1] { 12709 break 12710 } 12711 if mem != x2.Args[2] { 12712 break 12713 } 12714 s2 := o3.Args[1] 12715 if s2.Op != OpAMD64SHLQconst { 12716 break 12717 } 12718 if s2.AuxInt != 24 { 12719 break 12720 } 12721 x3 := s2.Args[0] 12722 if x3.Op != OpAMD64MOVBloadidx1 { 12723 break 12724 } 12725 if x3.AuxInt != i+3 { 12726 break 12727 } 12728 if x3.Aux != s { 12729 break 12730 } 12731 if p != x3.Args[0] { 12732 break 12733 } 12734 if idx != x3.Args[1] { 12735 break 12736 } 12737 if mem != x3.Args[2] { 12738 break 12739 } 12740 s3 := o2.Args[1] 12741 if s3.Op != OpAMD64SHLQconst { 12742 break 12743 } 12744 if s3.AuxInt != 32 { 12745 break 12746 } 12747 x4 := s3.Args[0] 12748 if x4.Op != OpAMD64MOVBloadidx1 { 12749 break 12750 } 12751 if x4.AuxInt != i+4 { 12752 break 12753 } 12754 if x4.Aux != s { 12755 break 12756 } 12757 if p != x4.Args[0] { 12758 break 12759 } 12760 if idx != x4.Args[1] { 12761 break 12762 } 12763 if mem != x4.Args[2] { 12764 break 12765 } 12766 s4 := o1.Args[1] 12767 if s4.Op != OpAMD64SHLQconst { 12768 break 12769 } 12770 if s4.AuxInt != 40 { 12771 break 12772 } 12773 x5 := s4.Args[0] 12774 if x5.Op != OpAMD64MOVBloadidx1 { 12775 break 12776 } 12777 if x5.AuxInt != i+5 { 12778 break 12779 } 12780 if x5.Aux != s { 12781 break 12782 } 12783 if p != x5.Args[0] { 12784 break 12785 } 12786 if idx != x5.Args[1] { 12787 break 12788 } 12789 if mem != x5.Args[2] { 12790 break 12791 } 12792 s5 := o0.Args[1] 12793 if s5.Op != OpAMD64SHLQconst { 12794 break 12795 } 12796 if s5.AuxInt != 48 { 12797 break 12798 } 12799 x6 := s5.Args[0] 12800 if x6.Op != OpAMD64MOVBloadidx1 { 12801 break 12802 } 12803 if x6.AuxInt != i+6 { 12804 break 12805 } 12806 if x6.Aux != s { 12807 break 12808 } 12809 if p != x6.Args[0] { 12810 break 12811 } 12812 if idx != x6.Args[1] { 12813 break 12814 } 12815 if mem != x6.Args[2] { 12816 break 12817 } 12818 s6 := v.Args[1] 12819 if s6.Op != OpAMD64SHLQconst { 12820 break 12821 } 12822 if s6.AuxInt != 56 { 12823 break 12824 } 12825 x7 := s6.Args[0] 12826 if x7.Op != OpAMD64MOVBloadidx1 { 12827 break 12828 } 12829 if x7.AuxInt != i+7 { 12830 break 12831 } 12832 if x7.Aux != s { 12833 break 12834 } 12835 if p != x7.Args[0] { 12836 break 12837 } 12838 if idx != x7.Args[1] { 12839 break 12840 } 12841 if mem != x7.Args[2] { 12842 break 12843 } 12844 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 12845 break 12846 } 12847 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 12848 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, v.Type) 12849 v.reset(OpCopy) 12850 v.AddArg(v0) 12851 v0.AuxInt = i 12852 v0.Aux = s 12853 v0.AddArg(p) 12854 v0.AddArg(idx) 12855 v0.AddArg(mem) 12856 return true 12857 } 12858 // match: (ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ x0:(MOVBload [i] {s} p mem) s0:(SHLQconst [8] x1:(MOVBload [i-1] {s} p mem))) s1:(SHLQconst [16] x2:(MOVBload [i-2] {s} p mem))) s2:(SHLQconst [24] x3:(MOVBload [i-3] {s} p mem))) s3:(SHLQconst [32] x4:(MOVBload [i-4] {s} p mem))) s4:(SHLQconst [40] x5:(MOVBload [i-5] {s} p mem))) s5:(SHLQconst [48] x6:(MOVBload [i-6] {s} p mem))) s6:(SHLQconst [56] x7:(MOVBload [i-7] {s} p mem))) 12859 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 12860 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ <v.Type> (MOVQload [i-7] {s} p mem)) 12861 for { 12862 o5 := v.Args[0] 12863 if o5.Op != OpAMD64ORQ { 12864 break 12865 } 12866 o4 := o5.Args[0] 12867 if o4.Op != OpAMD64ORQ { 12868 break 12869 } 12870 o3 := o4.Args[0] 12871 if o3.Op != OpAMD64ORQ { 12872 break 12873 } 12874 o2 := o3.Args[0] 12875 if o2.Op != OpAMD64ORQ { 12876 break 12877 } 12878 o1 := o2.Args[0] 12879 if o1.Op != OpAMD64ORQ { 12880 break 12881 } 12882 o0 := o1.Args[0] 12883 if o0.Op != OpAMD64ORQ { 12884 break 12885 } 12886 x0 := o0.Args[0] 12887 if x0.Op != OpAMD64MOVBload { 12888 break 12889 } 12890 i := x0.AuxInt 12891 s := x0.Aux 12892 p := x0.Args[0] 12893 mem := x0.Args[1] 12894 s0 := o0.Args[1] 12895 if s0.Op != OpAMD64SHLQconst { 12896 break 12897 } 12898 if s0.AuxInt != 8 { 12899 break 12900 } 12901 x1 := s0.Args[0] 12902 if x1.Op != OpAMD64MOVBload { 12903 break 12904 } 12905 if x1.AuxInt != i-1 { 12906 break 12907 } 12908 if x1.Aux != s { 12909 break 12910 } 12911 if p != x1.Args[0] { 12912 break 12913 } 12914 if mem != x1.Args[1] { 12915 break 12916 } 12917 s1 := o1.Args[1] 12918 if s1.Op != OpAMD64SHLQconst { 12919 break 12920 } 12921 if s1.AuxInt != 16 { 12922 break 12923 } 12924 x2 := s1.Args[0] 12925 if x2.Op != OpAMD64MOVBload { 12926 break 12927 } 12928 if x2.AuxInt != i-2 { 12929 break 12930 } 12931 if x2.Aux != s { 12932 break 12933 } 12934 if p != x2.Args[0] { 12935 break 12936 } 12937 if mem != x2.Args[1] { 12938 break 12939 } 12940 s2 := o2.Args[1] 12941 if s2.Op != OpAMD64SHLQconst { 12942 break 12943 } 12944 if s2.AuxInt != 24 { 12945 break 12946 } 12947 x3 := s2.Args[0] 12948 if x3.Op != OpAMD64MOVBload { 12949 break 12950 } 12951 if x3.AuxInt != i-3 { 12952 break 12953 } 12954 if x3.Aux != s { 12955 break 12956 } 12957 if p != x3.Args[0] { 12958 break 12959 } 12960 if mem != x3.Args[1] { 12961 break 12962 } 12963 s3 := o3.Args[1] 12964 if s3.Op != OpAMD64SHLQconst { 12965 break 12966 } 12967 if s3.AuxInt != 32 { 12968 break 12969 } 12970 x4 := s3.Args[0] 12971 if x4.Op != OpAMD64MOVBload { 12972 break 12973 } 12974 if x4.AuxInt != i-4 { 12975 break 12976 } 12977 if x4.Aux != s { 12978 break 12979 } 12980 if p != x4.Args[0] { 12981 break 12982 } 12983 if mem != x4.Args[1] { 12984 break 12985 } 12986 s4 := o4.Args[1] 12987 if s4.Op != OpAMD64SHLQconst { 12988 break 12989 } 12990 if s4.AuxInt != 40 { 12991 break 12992 } 12993 x5 := s4.Args[0] 12994 if x5.Op != OpAMD64MOVBload { 12995 break 12996 } 12997 if x5.AuxInt != i-5 { 12998 break 12999 } 13000 if x5.Aux != s { 13001 break 13002 } 13003 if p != x5.Args[0] { 13004 break 13005 } 13006 if mem != x5.Args[1] { 13007 break 13008 } 13009 s5 := o5.Args[1] 13010 if s5.Op != OpAMD64SHLQconst { 13011 break 13012 } 13013 if s5.AuxInt != 48 { 13014 break 13015 } 13016 x6 := s5.Args[0] 13017 if x6.Op != OpAMD64MOVBload { 13018 break 13019 } 13020 if x6.AuxInt != i-6 { 13021 break 13022 } 13023 if x6.Aux != s { 13024 break 13025 } 13026 if p != x6.Args[0] { 13027 break 13028 } 13029 if mem != x6.Args[1] { 13030 break 13031 } 13032 s6 := v.Args[1] 13033 if s6.Op != OpAMD64SHLQconst { 13034 break 13035 } 13036 if s6.AuxInt != 56 { 13037 break 13038 } 13039 x7 := s6.Args[0] 13040 if x7.Op != OpAMD64MOVBload { 13041 break 13042 } 13043 if x7.AuxInt != i-7 { 13044 break 13045 } 13046 if x7.Aux != s { 13047 break 13048 } 13049 if p != x7.Args[0] { 13050 break 13051 } 13052 if mem != x7.Args[1] { 13053 break 13054 } 13055 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 13056 break 13057 } 13058 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 13059 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 13060 v.reset(OpCopy) 13061 v.AddArg(v0) 13062 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64()) 13063 v1.AuxInt = i - 7 13064 v1.Aux = s 13065 v1.AddArg(p) 13066 v1.AddArg(mem) 13067 v0.AddArg(v1) 13068 return true 13069 } 13070 // match: (ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLQconst [8] x1:(MOVBloadidx1 [i-1] {s} p idx mem))) s1:(SHLQconst [16] x2:(MOVBloadidx1 [i-2] {s} p idx mem))) s2:(SHLQconst [24] x3:(MOVBloadidx1 [i-3] {s} p idx mem))) s3:(SHLQconst [32] x4:(MOVBloadidx1 [i-4] {s} p idx mem))) s4:(SHLQconst [40] x5:(MOVBloadidx1 [i-5] {s} p idx mem))) s5:(SHLQconst [48] x6:(MOVBloadidx1 [i-6] {s} p idx mem))) s6:(SHLQconst [56] x7:(MOVBloadidx1 [i-7] {s} p idx mem))) 13071 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) 13072 // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ <v.Type> (MOVQloadidx1 <v.Type> [i-7] {s} p idx mem)) 13073 for { 13074 o5 := v.Args[0] 13075 if o5.Op != OpAMD64ORQ { 13076 break 13077 } 13078 o4 := o5.Args[0] 13079 if o4.Op != OpAMD64ORQ { 13080 break 13081 } 13082 o3 := o4.Args[0] 13083 if o3.Op != OpAMD64ORQ { 13084 break 13085 } 13086 o2 := o3.Args[0] 13087 if o2.Op != OpAMD64ORQ { 13088 break 13089 } 13090 o1 := o2.Args[0] 13091 if o1.Op != OpAMD64ORQ { 13092 break 13093 } 13094 o0 := o1.Args[0] 13095 if o0.Op != OpAMD64ORQ { 13096 break 13097 } 13098 x0 := o0.Args[0] 13099 if x0.Op != OpAMD64MOVBloadidx1 { 13100 break 13101 } 13102 i := x0.AuxInt 13103 s := x0.Aux 13104 p := x0.Args[0] 13105 idx := x0.Args[1] 13106 mem := x0.Args[2] 13107 s0 := o0.Args[1] 13108 if s0.Op != OpAMD64SHLQconst { 13109 break 13110 } 13111 if s0.AuxInt != 8 { 13112 break 13113 } 13114 x1 := s0.Args[0] 13115 if x1.Op != OpAMD64MOVBloadidx1 { 13116 break 13117 } 13118 if x1.AuxInt != i-1 { 13119 break 13120 } 13121 if x1.Aux != s { 13122 break 13123 } 13124 if p != x1.Args[0] { 13125 break 13126 } 13127 if idx != x1.Args[1] { 13128 break 13129 } 13130 if mem != x1.Args[2] { 13131 break 13132 } 13133 s1 := o1.Args[1] 13134 if s1.Op != OpAMD64SHLQconst { 13135 break 13136 } 13137 if s1.AuxInt != 16 { 13138 break 13139 } 13140 x2 := s1.Args[0] 13141 if x2.Op != OpAMD64MOVBloadidx1 { 13142 break 13143 } 13144 if x2.AuxInt != i-2 { 13145 break 13146 } 13147 if x2.Aux != s { 13148 break 13149 } 13150 if p != x2.Args[0] { 13151 break 13152 } 13153 if idx != x2.Args[1] { 13154 break 13155 } 13156 if mem != x2.Args[2] { 13157 break 13158 } 13159 s2 := o2.Args[1] 13160 if s2.Op != OpAMD64SHLQconst { 13161 break 13162 } 13163 if s2.AuxInt != 24 { 13164 break 13165 } 13166 x3 := s2.Args[0] 13167 if x3.Op != OpAMD64MOVBloadidx1 { 13168 break 13169 } 13170 if x3.AuxInt != i-3 { 13171 break 13172 } 13173 if x3.Aux != s { 13174 break 13175 } 13176 if p != x3.Args[0] { 13177 break 13178 } 13179 if idx != x3.Args[1] { 13180 break 13181 } 13182 if mem != x3.Args[2] { 13183 break 13184 } 13185 s3 := o3.Args[1] 13186 if s3.Op != OpAMD64SHLQconst { 13187 break 13188 } 13189 if s3.AuxInt != 32 { 13190 break 13191 } 13192 x4 := s3.Args[0] 13193 if x4.Op != OpAMD64MOVBloadidx1 { 13194 break 13195 } 13196 if x4.AuxInt != i-4 { 13197 break 13198 } 13199 if x4.Aux != s { 13200 break 13201 } 13202 if p != x4.Args[0] { 13203 break 13204 } 13205 if idx != x4.Args[1] { 13206 break 13207 } 13208 if mem != x4.Args[2] { 13209 break 13210 } 13211 s4 := o4.Args[1] 13212 if s4.Op != OpAMD64SHLQconst { 13213 break 13214 } 13215 if s4.AuxInt != 40 { 13216 break 13217 } 13218 x5 := s4.Args[0] 13219 if x5.Op != OpAMD64MOVBloadidx1 { 13220 break 13221 } 13222 if x5.AuxInt != i-5 { 13223 break 13224 } 13225 if x5.Aux != s { 13226 break 13227 } 13228 if p != x5.Args[0] { 13229 break 13230 } 13231 if idx != x5.Args[1] { 13232 break 13233 } 13234 if mem != x5.Args[2] { 13235 break 13236 } 13237 s5 := o5.Args[1] 13238 if s5.Op != OpAMD64SHLQconst { 13239 break 13240 } 13241 if s5.AuxInt != 48 { 13242 break 13243 } 13244 x6 := s5.Args[0] 13245 if x6.Op != OpAMD64MOVBloadidx1 { 13246 break 13247 } 13248 if x6.AuxInt != i-6 { 13249 break 13250 } 13251 if x6.Aux != s { 13252 break 13253 } 13254 if p != x6.Args[0] { 13255 break 13256 } 13257 if idx != x6.Args[1] { 13258 break 13259 } 13260 if mem != x6.Args[2] { 13261 break 13262 } 13263 s6 := v.Args[1] 13264 if s6.Op != OpAMD64SHLQconst { 13265 break 13266 } 13267 if s6.AuxInt != 56 { 13268 break 13269 } 13270 x7 := s6.Args[0] 13271 if x7.Op != OpAMD64MOVBloadidx1 { 13272 break 13273 } 13274 if x7.AuxInt != i-7 { 13275 break 13276 } 13277 if x7.Aux != s { 13278 break 13279 } 13280 if p != x7.Args[0] { 13281 break 13282 } 13283 if idx != x7.Args[1] { 13284 break 13285 } 13286 if mem != x7.Args[2] { 13287 break 13288 } 13289 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { 13290 break 13291 } 13292 b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) 13293 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 13294 v.reset(OpCopy) 13295 v.AddArg(v0) 13296 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, v.Type) 13297 v1.AuxInt = i - 7 13298 v1.Aux = s 13299 v1.AddArg(p) 13300 v1.AddArg(idx) 13301 v1.AddArg(mem) 13302 v0.AddArg(v1) 13303 return true 13304 } 13305 return false 13306 } 13307 func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool { 13308 b := v.Block 13309 _ = b 13310 // match: (ORQconst [0] x) 13311 // cond: 13312 // result: x 13313 for { 13314 if v.AuxInt != 0 { 13315 break 13316 } 13317 x := v.Args[0] 13318 v.reset(OpCopy) 13319 v.Type = x.Type 13320 v.AddArg(x) 13321 return true 13322 } 13323 // match: (ORQconst [-1] _) 13324 // cond: 13325 // result: (MOVQconst [-1]) 13326 for { 13327 if v.AuxInt != -1 { 13328 break 13329 } 13330 v.reset(OpAMD64MOVQconst) 13331 v.AuxInt = -1 13332 return true 13333 } 13334 // match: (ORQconst [c] (MOVQconst [d])) 13335 // cond: 13336 // result: (MOVQconst [c|d]) 13337 for { 13338 c := v.AuxInt 13339 v_0 := v.Args[0] 13340 if v_0.Op != OpAMD64MOVQconst { 13341 break 13342 } 13343 d := v_0.AuxInt 13344 v.reset(OpAMD64MOVQconst) 13345 v.AuxInt = c | d 13346 return true 13347 } 13348 return false 13349 } 13350 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool { 13351 b := v.Block 13352 _ = b 13353 // match: (ROLBconst [c] (ROLBconst [d] x)) 13354 // cond: 13355 // result: (ROLBconst [(c+d)& 7] x) 13356 for { 13357 c := v.AuxInt 13358 v_0 := v.Args[0] 13359 if v_0.Op != OpAMD64ROLBconst { 13360 break 13361 } 13362 d := v_0.AuxInt 13363 x := v_0.Args[0] 13364 v.reset(OpAMD64ROLBconst) 13365 v.AuxInt = (c + d) & 7 13366 v.AddArg(x) 13367 return true 13368 } 13369 // match: (ROLBconst x [0]) 13370 // cond: 13371 // result: x 13372 for { 13373 if v.AuxInt != 0 { 13374 break 13375 } 13376 x := v.Args[0] 13377 v.reset(OpCopy) 13378 v.Type = x.Type 13379 v.AddArg(x) 13380 return true 13381 } 13382 return false 13383 } 13384 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool { 13385 b := v.Block 13386 _ = b 13387 // match: (ROLLconst [c] (ROLLconst [d] x)) 13388 // cond: 13389 // result: (ROLLconst [(c+d)&31] x) 13390 for { 13391 c := v.AuxInt 13392 v_0 := v.Args[0] 13393 if v_0.Op != OpAMD64ROLLconst { 13394 break 13395 } 13396 d := v_0.AuxInt 13397 x := v_0.Args[0] 13398 v.reset(OpAMD64ROLLconst) 13399 v.AuxInt = (c + d) & 31 13400 v.AddArg(x) 13401 return true 13402 } 13403 // match: (ROLLconst x [0]) 13404 // cond: 13405 // result: x 13406 for { 13407 if v.AuxInt != 0 { 13408 break 13409 } 13410 x := v.Args[0] 13411 v.reset(OpCopy) 13412 v.Type = x.Type 13413 v.AddArg(x) 13414 return true 13415 } 13416 return false 13417 } 13418 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool { 13419 b := v.Block 13420 _ = b 13421 // match: (ROLQconst [c] (ROLQconst [d] x)) 13422 // cond: 13423 // result: (ROLQconst [(c+d)&63] x) 13424 for { 13425 c := v.AuxInt 13426 v_0 := v.Args[0] 13427 if v_0.Op != OpAMD64ROLQconst { 13428 break 13429 } 13430 d := v_0.AuxInt 13431 x := v_0.Args[0] 13432 v.reset(OpAMD64ROLQconst) 13433 v.AuxInt = (c + d) & 63 13434 v.AddArg(x) 13435 return true 13436 } 13437 // match: (ROLQconst x [0]) 13438 // cond: 13439 // result: x 13440 for { 13441 if v.AuxInt != 0 { 13442 break 13443 } 13444 x := v.Args[0] 13445 v.reset(OpCopy) 13446 v.Type = x.Type 13447 v.AddArg(x) 13448 return true 13449 } 13450 return false 13451 } 13452 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool { 13453 b := v.Block 13454 _ = b 13455 // match: (ROLWconst [c] (ROLWconst [d] x)) 13456 // cond: 13457 // result: (ROLWconst [(c+d)&15] x) 13458 for { 13459 c := v.AuxInt 13460 v_0 := v.Args[0] 13461 if v_0.Op != OpAMD64ROLWconst { 13462 break 13463 } 13464 d := v_0.AuxInt 13465 x := v_0.Args[0] 13466 v.reset(OpAMD64ROLWconst) 13467 v.AuxInt = (c + d) & 15 13468 v.AddArg(x) 13469 return true 13470 } 13471 // match: (ROLWconst x [0]) 13472 // cond: 13473 // result: x 13474 for { 13475 if v.AuxInt != 0 { 13476 break 13477 } 13478 x := v.Args[0] 13479 v.reset(OpCopy) 13480 v.Type = x.Type 13481 v.AddArg(x) 13482 return true 13483 } 13484 return false 13485 } 13486 func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool { 13487 b := v.Block 13488 _ = b 13489 // match: (SARB x (MOVQconst [c])) 13490 // cond: 13491 // result: (SARBconst [min(c&31,7)] x) 13492 for { 13493 x := v.Args[0] 13494 v_1 := v.Args[1] 13495 if v_1.Op != OpAMD64MOVQconst { 13496 break 13497 } 13498 c := v_1.AuxInt 13499 v.reset(OpAMD64SARBconst) 13500 v.AuxInt = min(c&31, 7) 13501 v.AddArg(x) 13502 return true 13503 } 13504 // match: (SARB x (MOVLconst [c])) 13505 // cond: 13506 // result: (SARBconst [min(c&31,7)] x) 13507 for { 13508 x := v.Args[0] 13509 v_1 := v.Args[1] 13510 if v_1.Op != OpAMD64MOVLconst { 13511 break 13512 } 13513 c := v_1.AuxInt 13514 v.reset(OpAMD64SARBconst) 13515 v.AuxInt = min(c&31, 7) 13516 v.AddArg(x) 13517 return true 13518 } 13519 return false 13520 } 13521 func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool { 13522 b := v.Block 13523 _ = b 13524 // match: (SARBconst x [0]) 13525 // cond: 13526 // result: x 13527 for { 13528 if v.AuxInt != 0 { 13529 break 13530 } 13531 x := v.Args[0] 13532 v.reset(OpCopy) 13533 v.Type = x.Type 13534 v.AddArg(x) 13535 return true 13536 } 13537 // match: (SARBconst [c] (MOVQconst [d])) 13538 // cond: 13539 // result: (MOVQconst [d>>uint64(c)]) 13540 for { 13541 c := v.AuxInt 13542 v_0 := v.Args[0] 13543 if v_0.Op != OpAMD64MOVQconst { 13544 break 13545 } 13546 d := v_0.AuxInt 13547 v.reset(OpAMD64MOVQconst) 13548 v.AuxInt = d >> uint64(c) 13549 return true 13550 } 13551 return false 13552 } 13553 func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool { 13554 b := v.Block 13555 _ = b 13556 // match: (SARL x (MOVQconst [c])) 13557 // cond: 13558 // result: (SARLconst [c&31] x) 13559 for { 13560 x := v.Args[0] 13561 v_1 := v.Args[1] 13562 if v_1.Op != OpAMD64MOVQconst { 13563 break 13564 } 13565 c := v_1.AuxInt 13566 v.reset(OpAMD64SARLconst) 13567 v.AuxInt = c & 31 13568 v.AddArg(x) 13569 return true 13570 } 13571 // match: (SARL x (MOVLconst [c])) 13572 // cond: 13573 // result: (SARLconst [c&31] x) 13574 for { 13575 x := v.Args[0] 13576 v_1 := v.Args[1] 13577 if v_1.Op != OpAMD64MOVLconst { 13578 break 13579 } 13580 c := v_1.AuxInt 13581 v.reset(OpAMD64SARLconst) 13582 v.AuxInt = c & 31 13583 v.AddArg(x) 13584 return true 13585 } 13586 // match: (SARL x (ANDLconst [31] y)) 13587 // cond: 13588 // result: (SARL x y) 13589 for { 13590 x := v.Args[0] 13591 v_1 := v.Args[1] 13592 if v_1.Op != OpAMD64ANDLconst { 13593 break 13594 } 13595 if v_1.AuxInt != 31 { 13596 break 13597 } 13598 y := v_1.Args[0] 13599 v.reset(OpAMD64SARL) 13600 v.AddArg(x) 13601 v.AddArg(y) 13602 return true 13603 } 13604 return false 13605 } 13606 func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool { 13607 b := v.Block 13608 _ = b 13609 // match: (SARLconst x [0]) 13610 // cond: 13611 // result: x 13612 for { 13613 if v.AuxInt != 0 { 13614 break 13615 } 13616 x := v.Args[0] 13617 v.reset(OpCopy) 13618 v.Type = x.Type 13619 v.AddArg(x) 13620 return true 13621 } 13622 // match: (SARLconst [c] (MOVQconst [d])) 13623 // cond: 13624 // result: (MOVQconst [d>>uint64(c)]) 13625 for { 13626 c := v.AuxInt 13627 v_0 := v.Args[0] 13628 if v_0.Op != OpAMD64MOVQconst { 13629 break 13630 } 13631 d := v_0.AuxInt 13632 v.reset(OpAMD64MOVQconst) 13633 v.AuxInt = d >> uint64(c) 13634 return true 13635 } 13636 return false 13637 } 13638 func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool { 13639 b := v.Block 13640 _ = b 13641 // match: (SARQ x (MOVQconst [c])) 13642 // cond: 13643 // result: (SARQconst [c&63] x) 13644 for { 13645 x := v.Args[0] 13646 v_1 := v.Args[1] 13647 if v_1.Op != OpAMD64MOVQconst { 13648 break 13649 } 13650 c := v_1.AuxInt 13651 v.reset(OpAMD64SARQconst) 13652 v.AuxInt = c & 63 13653 v.AddArg(x) 13654 return true 13655 } 13656 // match: (SARQ x (MOVLconst [c])) 13657 // cond: 13658 // result: (SARQconst [c&63] x) 13659 for { 13660 x := v.Args[0] 13661 v_1 := v.Args[1] 13662 if v_1.Op != OpAMD64MOVLconst { 13663 break 13664 } 13665 c := v_1.AuxInt 13666 v.reset(OpAMD64SARQconst) 13667 v.AuxInt = c & 63 13668 v.AddArg(x) 13669 return true 13670 } 13671 // match: (SARQ x (ANDQconst [63] y)) 13672 // cond: 13673 // result: (SARQ x y) 13674 for { 13675 x := v.Args[0] 13676 v_1 := v.Args[1] 13677 if v_1.Op != OpAMD64ANDQconst { 13678 break 13679 } 13680 if v_1.AuxInt != 63 { 13681 break 13682 } 13683 y := v_1.Args[0] 13684 v.reset(OpAMD64SARQ) 13685 v.AddArg(x) 13686 v.AddArg(y) 13687 return true 13688 } 13689 return false 13690 } 13691 func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool { 13692 b := v.Block 13693 _ = b 13694 // match: (SARQconst x [0]) 13695 // cond: 13696 // result: x 13697 for { 13698 if v.AuxInt != 0 { 13699 break 13700 } 13701 x := v.Args[0] 13702 v.reset(OpCopy) 13703 v.Type = x.Type 13704 v.AddArg(x) 13705 return true 13706 } 13707 // match: (SARQconst [c] (MOVQconst [d])) 13708 // cond: 13709 // result: (MOVQconst [d>>uint64(c)]) 13710 for { 13711 c := v.AuxInt 13712 v_0 := v.Args[0] 13713 if v_0.Op != OpAMD64MOVQconst { 13714 break 13715 } 13716 d := v_0.AuxInt 13717 v.reset(OpAMD64MOVQconst) 13718 v.AuxInt = d >> uint64(c) 13719 return true 13720 } 13721 return false 13722 } 13723 func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool { 13724 b := v.Block 13725 _ = b 13726 // match: (SARW x (MOVQconst [c])) 13727 // cond: 13728 // result: (SARWconst [min(c&31,15)] x) 13729 for { 13730 x := v.Args[0] 13731 v_1 := v.Args[1] 13732 if v_1.Op != OpAMD64MOVQconst { 13733 break 13734 } 13735 c := v_1.AuxInt 13736 v.reset(OpAMD64SARWconst) 13737 v.AuxInt = min(c&31, 15) 13738 v.AddArg(x) 13739 return true 13740 } 13741 // match: (SARW x (MOVLconst [c])) 13742 // cond: 13743 // result: (SARWconst [min(c&31,15)] x) 13744 for { 13745 x := v.Args[0] 13746 v_1 := v.Args[1] 13747 if v_1.Op != OpAMD64MOVLconst { 13748 break 13749 } 13750 c := v_1.AuxInt 13751 v.reset(OpAMD64SARWconst) 13752 v.AuxInt = min(c&31, 15) 13753 v.AddArg(x) 13754 return true 13755 } 13756 return false 13757 } 13758 func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool { 13759 b := v.Block 13760 _ = b 13761 // match: (SARWconst x [0]) 13762 // cond: 13763 // result: x 13764 for { 13765 if v.AuxInt != 0 { 13766 break 13767 } 13768 x := v.Args[0] 13769 v.reset(OpCopy) 13770 v.Type = x.Type 13771 v.AddArg(x) 13772 return true 13773 } 13774 // match: (SARWconst [c] (MOVQconst [d])) 13775 // cond: 13776 // result: (MOVQconst [d>>uint64(c)]) 13777 for { 13778 c := v.AuxInt 13779 v_0 := v.Args[0] 13780 if v_0.Op != OpAMD64MOVQconst { 13781 break 13782 } 13783 d := v_0.AuxInt 13784 v.reset(OpAMD64MOVQconst) 13785 v.AuxInt = d >> uint64(c) 13786 return true 13787 } 13788 return false 13789 } 13790 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool { 13791 b := v.Block 13792 _ = b 13793 // match: (SBBLcarrymask (FlagEQ)) 13794 // cond: 13795 // result: (MOVLconst [0]) 13796 for { 13797 v_0 := v.Args[0] 13798 if v_0.Op != OpAMD64FlagEQ { 13799 break 13800 } 13801 v.reset(OpAMD64MOVLconst) 13802 v.AuxInt = 0 13803 return true 13804 } 13805 // match: (SBBLcarrymask (FlagLT_ULT)) 13806 // cond: 13807 // result: (MOVLconst [-1]) 13808 for { 13809 v_0 := v.Args[0] 13810 if v_0.Op != OpAMD64FlagLT_ULT { 13811 break 13812 } 13813 v.reset(OpAMD64MOVLconst) 13814 v.AuxInt = -1 13815 return true 13816 } 13817 // match: (SBBLcarrymask (FlagLT_UGT)) 13818 // cond: 13819 // result: (MOVLconst [0]) 13820 for { 13821 v_0 := v.Args[0] 13822 if v_0.Op != OpAMD64FlagLT_UGT { 13823 break 13824 } 13825 v.reset(OpAMD64MOVLconst) 13826 v.AuxInt = 0 13827 return true 13828 } 13829 // match: (SBBLcarrymask (FlagGT_ULT)) 13830 // cond: 13831 // result: (MOVLconst [-1]) 13832 for { 13833 v_0 := v.Args[0] 13834 if v_0.Op != OpAMD64FlagGT_ULT { 13835 break 13836 } 13837 v.reset(OpAMD64MOVLconst) 13838 v.AuxInt = -1 13839 return true 13840 } 13841 // match: (SBBLcarrymask (FlagGT_UGT)) 13842 // cond: 13843 // result: (MOVLconst [0]) 13844 for { 13845 v_0 := v.Args[0] 13846 if v_0.Op != OpAMD64FlagGT_UGT { 13847 break 13848 } 13849 v.reset(OpAMD64MOVLconst) 13850 v.AuxInt = 0 13851 return true 13852 } 13853 return false 13854 } 13855 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool { 13856 b := v.Block 13857 _ = b 13858 // match: (SBBQcarrymask (FlagEQ)) 13859 // cond: 13860 // result: (MOVQconst [0]) 13861 for { 13862 v_0 := v.Args[0] 13863 if v_0.Op != OpAMD64FlagEQ { 13864 break 13865 } 13866 v.reset(OpAMD64MOVQconst) 13867 v.AuxInt = 0 13868 return true 13869 } 13870 // match: (SBBQcarrymask (FlagLT_ULT)) 13871 // cond: 13872 // result: (MOVQconst [-1]) 13873 for { 13874 v_0 := v.Args[0] 13875 if v_0.Op != OpAMD64FlagLT_ULT { 13876 break 13877 } 13878 v.reset(OpAMD64MOVQconst) 13879 v.AuxInt = -1 13880 return true 13881 } 13882 // match: (SBBQcarrymask (FlagLT_UGT)) 13883 // cond: 13884 // result: (MOVQconst [0]) 13885 for { 13886 v_0 := v.Args[0] 13887 if v_0.Op != OpAMD64FlagLT_UGT { 13888 break 13889 } 13890 v.reset(OpAMD64MOVQconst) 13891 v.AuxInt = 0 13892 return true 13893 } 13894 // match: (SBBQcarrymask (FlagGT_ULT)) 13895 // cond: 13896 // result: (MOVQconst [-1]) 13897 for { 13898 v_0 := v.Args[0] 13899 if v_0.Op != OpAMD64FlagGT_ULT { 13900 break 13901 } 13902 v.reset(OpAMD64MOVQconst) 13903 v.AuxInt = -1 13904 return true 13905 } 13906 // match: (SBBQcarrymask (FlagGT_UGT)) 13907 // cond: 13908 // result: (MOVQconst [0]) 13909 for { 13910 v_0 := v.Args[0] 13911 if v_0.Op != OpAMD64FlagGT_UGT { 13912 break 13913 } 13914 v.reset(OpAMD64MOVQconst) 13915 v.AuxInt = 0 13916 return true 13917 } 13918 return false 13919 } 13920 func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool { 13921 b := v.Block 13922 _ = b 13923 // match: (SETA (InvertFlags x)) 13924 // cond: 13925 // result: (SETB x) 13926 for { 13927 v_0 := v.Args[0] 13928 if v_0.Op != OpAMD64InvertFlags { 13929 break 13930 } 13931 x := v_0.Args[0] 13932 v.reset(OpAMD64SETB) 13933 v.AddArg(x) 13934 return true 13935 } 13936 // match: (SETA (FlagEQ)) 13937 // cond: 13938 // result: (MOVLconst [0]) 13939 for { 13940 v_0 := v.Args[0] 13941 if v_0.Op != OpAMD64FlagEQ { 13942 break 13943 } 13944 v.reset(OpAMD64MOVLconst) 13945 v.AuxInt = 0 13946 return true 13947 } 13948 // match: (SETA (FlagLT_ULT)) 13949 // cond: 13950 // result: (MOVLconst [0]) 13951 for { 13952 v_0 := v.Args[0] 13953 if v_0.Op != OpAMD64FlagLT_ULT { 13954 break 13955 } 13956 v.reset(OpAMD64MOVLconst) 13957 v.AuxInt = 0 13958 return true 13959 } 13960 // match: (SETA (FlagLT_UGT)) 13961 // cond: 13962 // result: (MOVLconst [1]) 13963 for { 13964 v_0 := v.Args[0] 13965 if v_0.Op != OpAMD64FlagLT_UGT { 13966 break 13967 } 13968 v.reset(OpAMD64MOVLconst) 13969 v.AuxInt = 1 13970 return true 13971 } 13972 // match: (SETA (FlagGT_ULT)) 13973 // cond: 13974 // result: (MOVLconst [0]) 13975 for { 13976 v_0 := v.Args[0] 13977 if v_0.Op != OpAMD64FlagGT_ULT { 13978 break 13979 } 13980 v.reset(OpAMD64MOVLconst) 13981 v.AuxInt = 0 13982 return true 13983 } 13984 // match: (SETA (FlagGT_UGT)) 13985 // cond: 13986 // result: (MOVLconst [1]) 13987 for { 13988 v_0 := v.Args[0] 13989 if v_0.Op != OpAMD64FlagGT_UGT { 13990 break 13991 } 13992 v.reset(OpAMD64MOVLconst) 13993 v.AuxInt = 1 13994 return true 13995 } 13996 return false 13997 } 13998 func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool { 13999 b := v.Block 14000 _ = b 14001 // match: (SETAE (InvertFlags x)) 14002 // cond: 14003 // result: (SETBE x) 14004 for { 14005 v_0 := v.Args[0] 14006 if v_0.Op != OpAMD64InvertFlags { 14007 break 14008 } 14009 x := v_0.Args[0] 14010 v.reset(OpAMD64SETBE) 14011 v.AddArg(x) 14012 return true 14013 } 14014 // match: (SETAE (FlagEQ)) 14015 // cond: 14016 // result: (MOVLconst [1]) 14017 for { 14018 v_0 := v.Args[0] 14019 if v_0.Op != OpAMD64FlagEQ { 14020 break 14021 } 14022 v.reset(OpAMD64MOVLconst) 14023 v.AuxInt = 1 14024 return true 14025 } 14026 // match: (SETAE (FlagLT_ULT)) 14027 // cond: 14028 // result: (MOVLconst [0]) 14029 for { 14030 v_0 := v.Args[0] 14031 if v_0.Op != OpAMD64FlagLT_ULT { 14032 break 14033 } 14034 v.reset(OpAMD64MOVLconst) 14035 v.AuxInt = 0 14036 return true 14037 } 14038 // match: (SETAE (FlagLT_UGT)) 14039 // cond: 14040 // result: (MOVLconst [1]) 14041 for { 14042 v_0 := v.Args[0] 14043 if v_0.Op != OpAMD64FlagLT_UGT { 14044 break 14045 } 14046 v.reset(OpAMD64MOVLconst) 14047 v.AuxInt = 1 14048 return true 14049 } 14050 // match: (SETAE (FlagGT_ULT)) 14051 // cond: 14052 // result: (MOVLconst [0]) 14053 for { 14054 v_0 := v.Args[0] 14055 if v_0.Op != OpAMD64FlagGT_ULT { 14056 break 14057 } 14058 v.reset(OpAMD64MOVLconst) 14059 v.AuxInt = 0 14060 return true 14061 } 14062 // match: (SETAE (FlagGT_UGT)) 14063 // cond: 14064 // result: (MOVLconst [1]) 14065 for { 14066 v_0 := v.Args[0] 14067 if v_0.Op != OpAMD64FlagGT_UGT { 14068 break 14069 } 14070 v.reset(OpAMD64MOVLconst) 14071 v.AuxInt = 1 14072 return true 14073 } 14074 return false 14075 } 14076 func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool { 14077 b := v.Block 14078 _ = b 14079 // match: (SETB (InvertFlags x)) 14080 // cond: 14081 // result: (SETA x) 14082 for { 14083 v_0 := v.Args[0] 14084 if v_0.Op != OpAMD64InvertFlags { 14085 break 14086 } 14087 x := v_0.Args[0] 14088 v.reset(OpAMD64SETA) 14089 v.AddArg(x) 14090 return true 14091 } 14092 // match: (SETB (FlagEQ)) 14093 // cond: 14094 // result: (MOVLconst [0]) 14095 for { 14096 v_0 := v.Args[0] 14097 if v_0.Op != OpAMD64FlagEQ { 14098 break 14099 } 14100 v.reset(OpAMD64MOVLconst) 14101 v.AuxInt = 0 14102 return true 14103 } 14104 // match: (SETB (FlagLT_ULT)) 14105 // cond: 14106 // result: (MOVLconst [1]) 14107 for { 14108 v_0 := v.Args[0] 14109 if v_0.Op != OpAMD64FlagLT_ULT { 14110 break 14111 } 14112 v.reset(OpAMD64MOVLconst) 14113 v.AuxInt = 1 14114 return true 14115 } 14116 // match: (SETB (FlagLT_UGT)) 14117 // cond: 14118 // result: (MOVLconst [0]) 14119 for { 14120 v_0 := v.Args[0] 14121 if v_0.Op != OpAMD64FlagLT_UGT { 14122 break 14123 } 14124 v.reset(OpAMD64MOVLconst) 14125 v.AuxInt = 0 14126 return true 14127 } 14128 // match: (SETB (FlagGT_ULT)) 14129 // cond: 14130 // result: (MOVLconst [1]) 14131 for { 14132 v_0 := v.Args[0] 14133 if v_0.Op != OpAMD64FlagGT_ULT { 14134 break 14135 } 14136 v.reset(OpAMD64MOVLconst) 14137 v.AuxInt = 1 14138 return true 14139 } 14140 // match: (SETB (FlagGT_UGT)) 14141 // cond: 14142 // result: (MOVLconst [0]) 14143 for { 14144 v_0 := v.Args[0] 14145 if v_0.Op != OpAMD64FlagGT_UGT { 14146 break 14147 } 14148 v.reset(OpAMD64MOVLconst) 14149 v.AuxInt = 0 14150 return true 14151 } 14152 return false 14153 } 14154 func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool { 14155 b := v.Block 14156 _ = b 14157 // match: (SETBE (InvertFlags x)) 14158 // cond: 14159 // result: (SETAE x) 14160 for { 14161 v_0 := v.Args[0] 14162 if v_0.Op != OpAMD64InvertFlags { 14163 break 14164 } 14165 x := v_0.Args[0] 14166 v.reset(OpAMD64SETAE) 14167 v.AddArg(x) 14168 return true 14169 } 14170 // match: (SETBE (FlagEQ)) 14171 // cond: 14172 // result: (MOVLconst [1]) 14173 for { 14174 v_0 := v.Args[0] 14175 if v_0.Op != OpAMD64FlagEQ { 14176 break 14177 } 14178 v.reset(OpAMD64MOVLconst) 14179 v.AuxInt = 1 14180 return true 14181 } 14182 // match: (SETBE (FlagLT_ULT)) 14183 // cond: 14184 // result: (MOVLconst [1]) 14185 for { 14186 v_0 := v.Args[0] 14187 if v_0.Op != OpAMD64FlagLT_ULT { 14188 break 14189 } 14190 v.reset(OpAMD64MOVLconst) 14191 v.AuxInt = 1 14192 return true 14193 } 14194 // match: (SETBE (FlagLT_UGT)) 14195 // cond: 14196 // result: (MOVLconst [0]) 14197 for { 14198 v_0 := v.Args[0] 14199 if v_0.Op != OpAMD64FlagLT_UGT { 14200 break 14201 } 14202 v.reset(OpAMD64MOVLconst) 14203 v.AuxInt = 0 14204 return true 14205 } 14206 // match: (SETBE (FlagGT_ULT)) 14207 // cond: 14208 // result: (MOVLconst [1]) 14209 for { 14210 v_0 := v.Args[0] 14211 if v_0.Op != OpAMD64FlagGT_ULT { 14212 break 14213 } 14214 v.reset(OpAMD64MOVLconst) 14215 v.AuxInt = 1 14216 return true 14217 } 14218 // match: (SETBE (FlagGT_UGT)) 14219 // cond: 14220 // result: (MOVLconst [0]) 14221 for { 14222 v_0 := v.Args[0] 14223 if v_0.Op != OpAMD64FlagGT_UGT { 14224 break 14225 } 14226 v.reset(OpAMD64MOVLconst) 14227 v.AuxInt = 0 14228 return true 14229 } 14230 return false 14231 } 14232 func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool { 14233 b := v.Block 14234 _ = b 14235 // match: (SETEQ (InvertFlags x)) 14236 // cond: 14237 // result: (SETEQ x) 14238 for { 14239 v_0 := v.Args[0] 14240 if v_0.Op != OpAMD64InvertFlags { 14241 break 14242 } 14243 x := v_0.Args[0] 14244 v.reset(OpAMD64SETEQ) 14245 v.AddArg(x) 14246 return true 14247 } 14248 // match: (SETEQ (FlagEQ)) 14249 // cond: 14250 // result: (MOVLconst [1]) 14251 for { 14252 v_0 := v.Args[0] 14253 if v_0.Op != OpAMD64FlagEQ { 14254 break 14255 } 14256 v.reset(OpAMD64MOVLconst) 14257 v.AuxInt = 1 14258 return true 14259 } 14260 // match: (SETEQ (FlagLT_ULT)) 14261 // cond: 14262 // result: (MOVLconst [0]) 14263 for { 14264 v_0 := v.Args[0] 14265 if v_0.Op != OpAMD64FlagLT_ULT { 14266 break 14267 } 14268 v.reset(OpAMD64MOVLconst) 14269 v.AuxInt = 0 14270 return true 14271 } 14272 // match: (SETEQ (FlagLT_UGT)) 14273 // cond: 14274 // result: (MOVLconst [0]) 14275 for { 14276 v_0 := v.Args[0] 14277 if v_0.Op != OpAMD64FlagLT_UGT { 14278 break 14279 } 14280 v.reset(OpAMD64MOVLconst) 14281 v.AuxInt = 0 14282 return true 14283 } 14284 // match: (SETEQ (FlagGT_ULT)) 14285 // cond: 14286 // result: (MOVLconst [0]) 14287 for { 14288 v_0 := v.Args[0] 14289 if v_0.Op != OpAMD64FlagGT_ULT { 14290 break 14291 } 14292 v.reset(OpAMD64MOVLconst) 14293 v.AuxInt = 0 14294 return true 14295 } 14296 // match: (SETEQ (FlagGT_UGT)) 14297 // cond: 14298 // result: (MOVLconst [0]) 14299 for { 14300 v_0 := v.Args[0] 14301 if v_0.Op != OpAMD64FlagGT_UGT { 14302 break 14303 } 14304 v.reset(OpAMD64MOVLconst) 14305 v.AuxInt = 0 14306 return true 14307 } 14308 return false 14309 } 14310 func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool { 14311 b := v.Block 14312 _ = b 14313 // match: (SETG (InvertFlags x)) 14314 // cond: 14315 // result: (SETL x) 14316 for { 14317 v_0 := v.Args[0] 14318 if v_0.Op != OpAMD64InvertFlags { 14319 break 14320 } 14321 x := v_0.Args[0] 14322 v.reset(OpAMD64SETL) 14323 v.AddArg(x) 14324 return true 14325 } 14326 // match: (SETG (FlagEQ)) 14327 // cond: 14328 // result: (MOVLconst [0]) 14329 for { 14330 v_0 := v.Args[0] 14331 if v_0.Op != OpAMD64FlagEQ { 14332 break 14333 } 14334 v.reset(OpAMD64MOVLconst) 14335 v.AuxInt = 0 14336 return true 14337 } 14338 // match: (SETG (FlagLT_ULT)) 14339 // cond: 14340 // result: (MOVLconst [0]) 14341 for { 14342 v_0 := v.Args[0] 14343 if v_0.Op != OpAMD64FlagLT_ULT { 14344 break 14345 } 14346 v.reset(OpAMD64MOVLconst) 14347 v.AuxInt = 0 14348 return true 14349 } 14350 // match: (SETG (FlagLT_UGT)) 14351 // cond: 14352 // result: (MOVLconst [0]) 14353 for { 14354 v_0 := v.Args[0] 14355 if v_0.Op != OpAMD64FlagLT_UGT { 14356 break 14357 } 14358 v.reset(OpAMD64MOVLconst) 14359 v.AuxInt = 0 14360 return true 14361 } 14362 // match: (SETG (FlagGT_ULT)) 14363 // cond: 14364 // result: (MOVLconst [1]) 14365 for { 14366 v_0 := v.Args[0] 14367 if v_0.Op != OpAMD64FlagGT_ULT { 14368 break 14369 } 14370 v.reset(OpAMD64MOVLconst) 14371 v.AuxInt = 1 14372 return true 14373 } 14374 // match: (SETG (FlagGT_UGT)) 14375 // cond: 14376 // result: (MOVLconst [1]) 14377 for { 14378 v_0 := v.Args[0] 14379 if v_0.Op != OpAMD64FlagGT_UGT { 14380 break 14381 } 14382 v.reset(OpAMD64MOVLconst) 14383 v.AuxInt = 1 14384 return true 14385 } 14386 return false 14387 } 14388 func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool { 14389 b := v.Block 14390 _ = b 14391 // match: (SETGE (InvertFlags x)) 14392 // cond: 14393 // result: (SETLE x) 14394 for { 14395 v_0 := v.Args[0] 14396 if v_0.Op != OpAMD64InvertFlags { 14397 break 14398 } 14399 x := v_0.Args[0] 14400 v.reset(OpAMD64SETLE) 14401 v.AddArg(x) 14402 return true 14403 } 14404 // match: (SETGE (FlagEQ)) 14405 // cond: 14406 // result: (MOVLconst [1]) 14407 for { 14408 v_0 := v.Args[0] 14409 if v_0.Op != OpAMD64FlagEQ { 14410 break 14411 } 14412 v.reset(OpAMD64MOVLconst) 14413 v.AuxInt = 1 14414 return true 14415 } 14416 // match: (SETGE (FlagLT_ULT)) 14417 // cond: 14418 // result: (MOVLconst [0]) 14419 for { 14420 v_0 := v.Args[0] 14421 if v_0.Op != OpAMD64FlagLT_ULT { 14422 break 14423 } 14424 v.reset(OpAMD64MOVLconst) 14425 v.AuxInt = 0 14426 return true 14427 } 14428 // match: (SETGE (FlagLT_UGT)) 14429 // cond: 14430 // result: (MOVLconst [0]) 14431 for { 14432 v_0 := v.Args[0] 14433 if v_0.Op != OpAMD64FlagLT_UGT { 14434 break 14435 } 14436 v.reset(OpAMD64MOVLconst) 14437 v.AuxInt = 0 14438 return true 14439 } 14440 // match: (SETGE (FlagGT_ULT)) 14441 // cond: 14442 // result: (MOVLconst [1]) 14443 for { 14444 v_0 := v.Args[0] 14445 if v_0.Op != OpAMD64FlagGT_ULT { 14446 break 14447 } 14448 v.reset(OpAMD64MOVLconst) 14449 v.AuxInt = 1 14450 return true 14451 } 14452 // match: (SETGE (FlagGT_UGT)) 14453 // cond: 14454 // result: (MOVLconst [1]) 14455 for { 14456 v_0 := v.Args[0] 14457 if v_0.Op != OpAMD64FlagGT_UGT { 14458 break 14459 } 14460 v.reset(OpAMD64MOVLconst) 14461 v.AuxInt = 1 14462 return true 14463 } 14464 return false 14465 } 14466 func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool { 14467 b := v.Block 14468 _ = b 14469 // match: (SETL (InvertFlags x)) 14470 // cond: 14471 // result: (SETG x) 14472 for { 14473 v_0 := v.Args[0] 14474 if v_0.Op != OpAMD64InvertFlags { 14475 break 14476 } 14477 x := v_0.Args[0] 14478 v.reset(OpAMD64SETG) 14479 v.AddArg(x) 14480 return true 14481 } 14482 // match: (SETL (FlagEQ)) 14483 // cond: 14484 // result: (MOVLconst [0]) 14485 for { 14486 v_0 := v.Args[0] 14487 if v_0.Op != OpAMD64FlagEQ { 14488 break 14489 } 14490 v.reset(OpAMD64MOVLconst) 14491 v.AuxInt = 0 14492 return true 14493 } 14494 // match: (SETL (FlagLT_ULT)) 14495 // cond: 14496 // result: (MOVLconst [1]) 14497 for { 14498 v_0 := v.Args[0] 14499 if v_0.Op != OpAMD64FlagLT_ULT { 14500 break 14501 } 14502 v.reset(OpAMD64MOVLconst) 14503 v.AuxInt = 1 14504 return true 14505 } 14506 // match: (SETL (FlagLT_UGT)) 14507 // cond: 14508 // result: (MOVLconst [1]) 14509 for { 14510 v_0 := v.Args[0] 14511 if v_0.Op != OpAMD64FlagLT_UGT { 14512 break 14513 } 14514 v.reset(OpAMD64MOVLconst) 14515 v.AuxInt = 1 14516 return true 14517 } 14518 // match: (SETL (FlagGT_ULT)) 14519 // cond: 14520 // result: (MOVLconst [0]) 14521 for { 14522 v_0 := v.Args[0] 14523 if v_0.Op != OpAMD64FlagGT_ULT { 14524 break 14525 } 14526 v.reset(OpAMD64MOVLconst) 14527 v.AuxInt = 0 14528 return true 14529 } 14530 // match: (SETL (FlagGT_UGT)) 14531 // cond: 14532 // result: (MOVLconst [0]) 14533 for { 14534 v_0 := v.Args[0] 14535 if v_0.Op != OpAMD64FlagGT_UGT { 14536 break 14537 } 14538 v.reset(OpAMD64MOVLconst) 14539 v.AuxInt = 0 14540 return true 14541 } 14542 return false 14543 } 14544 func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool { 14545 b := v.Block 14546 _ = b 14547 // match: (SETLE (InvertFlags x)) 14548 // cond: 14549 // result: (SETGE x) 14550 for { 14551 v_0 := v.Args[0] 14552 if v_0.Op != OpAMD64InvertFlags { 14553 break 14554 } 14555 x := v_0.Args[0] 14556 v.reset(OpAMD64SETGE) 14557 v.AddArg(x) 14558 return true 14559 } 14560 // match: (SETLE (FlagEQ)) 14561 // cond: 14562 // result: (MOVLconst [1]) 14563 for { 14564 v_0 := v.Args[0] 14565 if v_0.Op != OpAMD64FlagEQ { 14566 break 14567 } 14568 v.reset(OpAMD64MOVLconst) 14569 v.AuxInt = 1 14570 return true 14571 } 14572 // match: (SETLE (FlagLT_ULT)) 14573 // cond: 14574 // result: (MOVLconst [1]) 14575 for { 14576 v_0 := v.Args[0] 14577 if v_0.Op != OpAMD64FlagLT_ULT { 14578 break 14579 } 14580 v.reset(OpAMD64MOVLconst) 14581 v.AuxInt = 1 14582 return true 14583 } 14584 // match: (SETLE (FlagLT_UGT)) 14585 // cond: 14586 // result: (MOVLconst [1]) 14587 for { 14588 v_0 := v.Args[0] 14589 if v_0.Op != OpAMD64FlagLT_UGT { 14590 break 14591 } 14592 v.reset(OpAMD64MOVLconst) 14593 v.AuxInt = 1 14594 return true 14595 } 14596 // match: (SETLE (FlagGT_ULT)) 14597 // cond: 14598 // result: (MOVLconst [0]) 14599 for { 14600 v_0 := v.Args[0] 14601 if v_0.Op != OpAMD64FlagGT_ULT { 14602 break 14603 } 14604 v.reset(OpAMD64MOVLconst) 14605 v.AuxInt = 0 14606 return true 14607 } 14608 // match: (SETLE (FlagGT_UGT)) 14609 // cond: 14610 // result: (MOVLconst [0]) 14611 for { 14612 v_0 := v.Args[0] 14613 if v_0.Op != OpAMD64FlagGT_UGT { 14614 break 14615 } 14616 v.reset(OpAMD64MOVLconst) 14617 v.AuxInt = 0 14618 return true 14619 } 14620 return false 14621 } 14622 func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool { 14623 b := v.Block 14624 _ = b 14625 // match: (SETNE (InvertFlags x)) 14626 // cond: 14627 // result: (SETNE x) 14628 for { 14629 v_0 := v.Args[0] 14630 if v_0.Op != OpAMD64InvertFlags { 14631 break 14632 } 14633 x := v_0.Args[0] 14634 v.reset(OpAMD64SETNE) 14635 v.AddArg(x) 14636 return true 14637 } 14638 // match: (SETNE (FlagEQ)) 14639 // cond: 14640 // result: (MOVLconst [0]) 14641 for { 14642 v_0 := v.Args[0] 14643 if v_0.Op != OpAMD64FlagEQ { 14644 break 14645 } 14646 v.reset(OpAMD64MOVLconst) 14647 v.AuxInt = 0 14648 return true 14649 } 14650 // match: (SETNE (FlagLT_ULT)) 14651 // cond: 14652 // result: (MOVLconst [1]) 14653 for { 14654 v_0 := v.Args[0] 14655 if v_0.Op != OpAMD64FlagLT_ULT { 14656 break 14657 } 14658 v.reset(OpAMD64MOVLconst) 14659 v.AuxInt = 1 14660 return true 14661 } 14662 // match: (SETNE (FlagLT_UGT)) 14663 // cond: 14664 // result: (MOVLconst [1]) 14665 for { 14666 v_0 := v.Args[0] 14667 if v_0.Op != OpAMD64FlagLT_UGT { 14668 break 14669 } 14670 v.reset(OpAMD64MOVLconst) 14671 v.AuxInt = 1 14672 return true 14673 } 14674 // match: (SETNE (FlagGT_ULT)) 14675 // cond: 14676 // result: (MOVLconst [1]) 14677 for { 14678 v_0 := v.Args[0] 14679 if v_0.Op != OpAMD64FlagGT_ULT { 14680 break 14681 } 14682 v.reset(OpAMD64MOVLconst) 14683 v.AuxInt = 1 14684 return true 14685 } 14686 // match: (SETNE (FlagGT_UGT)) 14687 // cond: 14688 // result: (MOVLconst [1]) 14689 for { 14690 v_0 := v.Args[0] 14691 if v_0.Op != OpAMD64FlagGT_UGT { 14692 break 14693 } 14694 v.reset(OpAMD64MOVLconst) 14695 v.AuxInt = 1 14696 return true 14697 } 14698 return false 14699 } 14700 func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { 14701 b := v.Block 14702 _ = b 14703 // match: (SHLL x (MOVQconst [c])) 14704 // cond: 14705 // result: (SHLLconst [c&31] x) 14706 for { 14707 x := v.Args[0] 14708 v_1 := v.Args[1] 14709 if v_1.Op != OpAMD64MOVQconst { 14710 break 14711 } 14712 c := v_1.AuxInt 14713 v.reset(OpAMD64SHLLconst) 14714 v.AuxInt = c & 31 14715 v.AddArg(x) 14716 return true 14717 } 14718 // match: (SHLL x (MOVLconst [c])) 14719 // cond: 14720 // result: (SHLLconst [c&31] x) 14721 for { 14722 x := v.Args[0] 14723 v_1 := v.Args[1] 14724 if v_1.Op != OpAMD64MOVLconst { 14725 break 14726 } 14727 c := v_1.AuxInt 14728 v.reset(OpAMD64SHLLconst) 14729 v.AuxInt = c & 31 14730 v.AddArg(x) 14731 return true 14732 } 14733 // match: (SHLL x (ANDLconst [31] y)) 14734 // cond: 14735 // result: (SHLL x y) 14736 for { 14737 x := v.Args[0] 14738 v_1 := v.Args[1] 14739 if v_1.Op != OpAMD64ANDLconst { 14740 break 14741 } 14742 if v_1.AuxInt != 31 { 14743 break 14744 } 14745 y := v_1.Args[0] 14746 v.reset(OpAMD64SHLL) 14747 v.AddArg(x) 14748 v.AddArg(y) 14749 return true 14750 } 14751 return false 14752 } 14753 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value, config *Config) bool { 14754 b := v.Block 14755 _ = b 14756 // match: (SHLLconst x [0]) 14757 // cond: 14758 // result: x 14759 for { 14760 if v.AuxInt != 0 { 14761 break 14762 } 14763 x := v.Args[0] 14764 v.reset(OpCopy) 14765 v.Type = x.Type 14766 v.AddArg(x) 14767 return true 14768 } 14769 return false 14770 } 14771 func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { 14772 b := v.Block 14773 _ = b 14774 // match: (SHLQ x (MOVQconst [c])) 14775 // cond: 14776 // result: (SHLQconst [c&63] x) 14777 for { 14778 x := v.Args[0] 14779 v_1 := v.Args[1] 14780 if v_1.Op != OpAMD64MOVQconst { 14781 break 14782 } 14783 c := v_1.AuxInt 14784 v.reset(OpAMD64SHLQconst) 14785 v.AuxInt = c & 63 14786 v.AddArg(x) 14787 return true 14788 } 14789 // match: (SHLQ x (MOVLconst [c])) 14790 // cond: 14791 // result: (SHLQconst [c&63] x) 14792 for { 14793 x := v.Args[0] 14794 v_1 := v.Args[1] 14795 if v_1.Op != OpAMD64MOVLconst { 14796 break 14797 } 14798 c := v_1.AuxInt 14799 v.reset(OpAMD64SHLQconst) 14800 v.AuxInt = c & 63 14801 v.AddArg(x) 14802 return true 14803 } 14804 // match: (SHLQ x (ANDQconst [63] y)) 14805 // cond: 14806 // result: (SHLQ x y) 14807 for { 14808 x := v.Args[0] 14809 v_1 := v.Args[1] 14810 if v_1.Op != OpAMD64ANDQconst { 14811 break 14812 } 14813 if v_1.AuxInt != 63 { 14814 break 14815 } 14816 y := v_1.Args[0] 14817 v.reset(OpAMD64SHLQ) 14818 v.AddArg(x) 14819 v.AddArg(y) 14820 return true 14821 } 14822 return false 14823 } 14824 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value, config *Config) bool { 14825 b := v.Block 14826 _ = b 14827 // match: (SHLQconst x [0]) 14828 // cond: 14829 // result: x 14830 for { 14831 if v.AuxInt != 0 { 14832 break 14833 } 14834 x := v.Args[0] 14835 v.reset(OpCopy) 14836 v.Type = x.Type 14837 v.AddArg(x) 14838 return true 14839 } 14840 return false 14841 } 14842 func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool { 14843 b := v.Block 14844 _ = b 14845 // match: (SHRB x (MOVQconst [c])) 14846 // cond: c&31 < 8 14847 // result: (SHRBconst [c&31] x) 14848 for { 14849 x := v.Args[0] 14850 v_1 := v.Args[1] 14851 if v_1.Op != OpAMD64MOVQconst { 14852 break 14853 } 14854 c := v_1.AuxInt 14855 if !(c&31 < 8) { 14856 break 14857 } 14858 v.reset(OpAMD64SHRBconst) 14859 v.AuxInt = c & 31 14860 v.AddArg(x) 14861 return true 14862 } 14863 // match: (SHRB x (MOVLconst [c])) 14864 // cond: c&31 < 8 14865 // result: (SHRBconst [c&31] x) 14866 for { 14867 x := v.Args[0] 14868 v_1 := v.Args[1] 14869 if v_1.Op != OpAMD64MOVLconst { 14870 break 14871 } 14872 c := v_1.AuxInt 14873 if !(c&31 < 8) { 14874 break 14875 } 14876 v.reset(OpAMD64SHRBconst) 14877 v.AuxInt = c & 31 14878 v.AddArg(x) 14879 return true 14880 } 14881 // match: (SHRB _ (MOVQconst [c])) 14882 // cond: c&31 >= 8 14883 // result: (MOVLconst [0]) 14884 for { 14885 v_1 := v.Args[1] 14886 if v_1.Op != OpAMD64MOVQconst { 14887 break 14888 } 14889 c := v_1.AuxInt 14890 if !(c&31 >= 8) { 14891 break 14892 } 14893 v.reset(OpAMD64MOVLconst) 14894 v.AuxInt = 0 14895 return true 14896 } 14897 // match: (SHRB _ (MOVLconst [c])) 14898 // cond: c&31 >= 8 14899 // result: (MOVLconst [0]) 14900 for { 14901 v_1 := v.Args[1] 14902 if v_1.Op != OpAMD64MOVLconst { 14903 break 14904 } 14905 c := v_1.AuxInt 14906 if !(c&31 >= 8) { 14907 break 14908 } 14909 v.reset(OpAMD64MOVLconst) 14910 v.AuxInt = 0 14911 return true 14912 } 14913 return false 14914 } 14915 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value, config *Config) bool { 14916 b := v.Block 14917 _ = b 14918 // match: (SHRBconst x [0]) 14919 // cond: 14920 // result: x 14921 for { 14922 if v.AuxInt != 0 { 14923 break 14924 } 14925 x := v.Args[0] 14926 v.reset(OpCopy) 14927 v.Type = x.Type 14928 v.AddArg(x) 14929 return true 14930 } 14931 return false 14932 } 14933 func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool { 14934 b := v.Block 14935 _ = b 14936 // match: (SHRL x (MOVQconst [c])) 14937 // cond: 14938 // result: (SHRLconst [c&31] x) 14939 for { 14940 x := v.Args[0] 14941 v_1 := v.Args[1] 14942 if v_1.Op != OpAMD64MOVQconst { 14943 break 14944 } 14945 c := v_1.AuxInt 14946 v.reset(OpAMD64SHRLconst) 14947 v.AuxInt = c & 31 14948 v.AddArg(x) 14949 return true 14950 } 14951 // match: (SHRL x (MOVLconst [c])) 14952 // cond: 14953 // result: (SHRLconst [c&31] x) 14954 for { 14955 x := v.Args[0] 14956 v_1 := v.Args[1] 14957 if v_1.Op != OpAMD64MOVLconst { 14958 break 14959 } 14960 c := v_1.AuxInt 14961 v.reset(OpAMD64SHRLconst) 14962 v.AuxInt = c & 31 14963 v.AddArg(x) 14964 return true 14965 } 14966 // match: (SHRL x (ANDLconst [31] y)) 14967 // cond: 14968 // result: (SHRL x y) 14969 for { 14970 x := v.Args[0] 14971 v_1 := v.Args[1] 14972 if v_1.Op != OpAMD64ANDLconst { 14973 break 14974 } 14975 if v_1.AuxInt != 31 { 14976 break 14977 } 14978 y := v_1.Args[0] 14979 v.reset(OpAMD64SHRL) 14980 v.AddArg(x) 14981 v.AddArg(y) 14982 return true 14983 } 14984 return false 14985 } 14986 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value, config *Config) bool { 14987 b := v.Block 14988 _ = b 14989 // match: (SHRLconst x [0]) 14990 // cond: 14991 // result: x 14992 for { 14993 if v.AuxInt != 0 { 14994 break 14995 } 14996 x := v.Args[0] 14997 v.reset(OpCopy) 14998 v.Type = x.Type 14999 v.AddArg(x) 15000 return true 15001 } 15002 return false 15003 } 15004 func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool { 15005 b := v.Block 15006 _ = b 15007 // match: (SHRQ x (MOVQconst [c])) 15008 // cond: 15009 // result: (SHRQconst [c&63] x) 15010 for { 15011 x := v.Args[0] 15012 v_1 := v.Args[1] 15013 if v_1.Op != OpAMD64MOVQconst { 15014 break 15015 } 15016 c := v_1.AuxInt 15017 v.reset(OpAMD64SHRQconst) 15018 v.AuxInt = c & 63 15019 v.AddArg(x) 15020 return true 15021 } 15022 // match: (SHRQ x (MOVLconst [c])) 15023 // cond: 15024 // result: (SHRQconst [c&63] x) 15025 for { 15026 x := v.Args[0] 15027 v_1 := v.Args[1] 15028 if v_1.Op != OpAMD64MOVLconst { 15029 break 15030 } 15031 c := v_1.AuxInt 15032 v.reset(OpAMD64SHRQconst) 15033 v.AuxInt = c & 63 15034 v.AddArg(x) 15035 return true 15036 } 15037 // match: (SHRQ x (ANDQconst [63] y)) 15038 // cond: 15039 // result: (SHRQ x y) 15040 for { 15041 x := v.Args[0] 15042 v_1 := v.Args[1] 15043 if v_1.Op != OpAMD64ANDQconst { 15044 break 15045 } 15046 if v_1.AuxInt != 63 { 15047 break 15048 } 15049 y := v_1.Args[0] 15050 v.reset(OpAMD64SHRQ) 15051 v.AddArg(x) 15052 v.AddArg(y) 15053 return true 15054 } 15055 return false 15056 } 15057 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value, config *Config) bool { 15058 b := v.Block 15059 _ = b 15060 // match: (SHRQconst x [0]) 15061 // cond: 15062 // result: x 15063 for { 15064 if v.AuxInt != 0 { 15065 break 15066 } 15067 x := v.Args[0] 15068 v.reset(OpCopy) 15069 v.Type = x.Type 15070 v.AddArg(x) 15071 return true 15072 } 15073 return false 15074 } 15075 func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool { 15076 b := v.Block 15077 _ = b 15078 // match: (SHRW x (MOVQconst [c])) 15079 // cond: c&31 < 16 15080 // result: (SHRWconst [c&31] x) 15081 for { 15082 x := v.Args[0] 15083 v_1 := v.Args[1] 15084 if v_1.Op != OpAMD64MOVQconst { 15085 break 15086 } 15087 c := v_1.AuxInt 15088 if !(c&31 < 16) { 15089 break 15090 } 15091 v.reset(OpAMD64SHRWconst) 15092 v.AuxInt = c & 31 15093 v.AddArg(x) 15094 return true 15095 } 15096 // match: (SHRW x (MOVLconst [c])) 15097 // cond: c&31 < 16 15098 // result: (SHRWconst [c&31] x) 15099 for { 15100 x := v.Args[0] 15101 v_1 := v.Args[1] 15102 if v_1.Op != OpAMD64MOVLconst { 15103 break 15104 } 15105 c := v_1.AuxInt 15106 if !(c&31 < 16) { 15107 break 15108 } 15109 v.reset(OpAMD64SHRWconst) 15110 v.AuxInt = c & 31 15111 v.AddArg(x) 15112 return true 15113 } 15114 // match: (SHRW _ (MOVQconst [c])) 15115 // cond: c&31 >= 16 15116 // result: (MOVLconst [0]) 15117 for { 15118 v_1 := v.Args[1] 15119 if v_1.Op != OpAMD64MOVQconst { 15120 break 15121 } 15122 c := v_1.AuxInt 15123 if !(c&31 >= 16) { 15124 break 15125 } 15126 v.reset(OpAMD64MOVLconst) 15127 v.AuxInt = 0 15128 return true 15129 } 15130 // match: (SHRW _ (MOVLconst [c])) 15131 // cond: c&31 >= 16 15132 // result: (MOVLconst [0]) 15133 for { 15134 v_1 := v.Args[1] 15135 if v_1.Op != OpAMD64MOVLconst { 15136 break 15137 } 15138 c := v_1.AuxInt 15139 if !(c&31 >= 16) { 15140 break 15141 } 15142 v.reset(OpAMD64MOVLconst) 15143 v.AuxInt = 0 15144 return true 15145 } 15146 return false 15147 } 15148 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value, config *Config) bool { 15149 b := v.Block 15150 _ = b 15151 // match: (SHRWconst x [0]) 15152 // cond: 15153 // result: x 15154 for { 15155 if v.AuxInt != 0 { 15156 break 15157 } 15158 x := v.Args[0] 15159 v.reset(OpCopy) 15160 v.Type = x.Type 15161 v.AddArg(x) 15162 return true 15163 } 15164 return false 15165 } 15166 func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool { 15167 b := v.Block 15168 _ = b 15169 // match: (SUBL x (MOVLconst [c])) 15170 // cond: 15171 // result: (SUBLconst x [c]) 15172 for { 15173 x := v.Args[0] 15174 v_1 := v.Args[1] 15175 if v_1.Op != OpAMD64MOVLconst { 15176 break 15177 } 15178 c := v_1.AuxInt 15179 v.reset(OpAMD64SUBLconst) 15180 v.AuxInt = c 15181 v.AddArg(x) 15182 return true 15183 } 15184 // match: (SUBL (MOVLconst [c]) x) 15185 // cond: 15186 // result: (NEGL (SUBLconst <v.Type> x [c])) 15187 for { 15188 v_0 := v.Args[0] 15189 if v_0.Op != OpAMD64MOVLconst { 15190 break 15191 } 15192 c := v_0.AuxInt 15193 x := v.Args[1] 15194 v.reset(OpAMD64NEGL) 15195 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 15196 v0.AuxInt = c 15197 v0.AddArg(x) 15198 v.AddArg(v0) 15199 return true 15200 } 15201 // match: (SUBL x x) 15202 // cond: 15203 // result: (MOVLconst [0]) 15204 for { 15205 x := v.Args[0] 15206 if x != v.Args[1] { 15207 break 15208 } 15209 v.reset(OpAMD64MOVLconst) 15210 v.AuxInt = 0 15211 return true 15212 } 15213 return false 15214 } 15215 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool { 15216 b := v.Block 15217 _ = b 15218 // match: (SUBLconst [c] x) 15219 // cond: int32(c) == 0 15220 // result: x 15221 for { 15222 c := v.AuxInt 15223 x := v.Args[0] 15224 if !(int32(c) == 0) { 15225 break 15226 } 15227 v.reset(OpCopy) 15228 v.Type = x.Type 15229 v.AddArg(x) 15230 return true 15231 } 15232 // match: (SUBLconst [c] x) 15233 // cond: 15234 // result: (ADDLconst [int64(int32(-c))] x) 15235 for { 15236 c := v.AuxInt 15237 x := v.Args[0] 15238 v.reset(OpAMD64ADDLconst) 15239 v.AuxInt = int64(int32(-c)) 15240 v.AddArg(x) 15241 return true 15242 } 15243 } 15244 func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool { 15245 b := v.Block 15246 _ = b 15247 // match: (SUBQ x (MOVQconst [c])) 15248 // cond: is32Bit(c) 15249 // result: (SUBQconst x [c]) 15250 for { 15251 x := v.Args[0] 15252 v_1 := v.Args[1] 15253 if v_1.Op != OpAMD64MOVQconst { 15254 break 15255 } 15256 c := v_1.AuxInt 15257 if !(is32Bit(c)) { 15258 break 15259 } 15260 v.reset(OpAMD64SUBQconst) 15261 v.AuxInt = c 15262 v.AddArg(x) 15263 return true 15264 } 15265 // match: (SUBQ (MOVQconst [c]) x) 15266 // cond: is32Bit(c) 15267 // result: (NEGQ (SUBQconst <v.Type> x [c])) 15268 for { 15269 v_0 := v.Args[0] 15270 if v_0.Op != OpAMD64MOVQconst { 15271 break 15272 } 15273 c := v_0.AuxInt 15274 x := v.Args[1] 15275 if !(is32Bit(c)) { 15276 break 15277 } 15278 v.reset(OpAMD64NEGQ) 15279 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 15280 v0.AuxInt = c 15281 v0.AddArg(x) 15282 v.AddArg(v0) 15283 return true 15284 } 15285 // match: (SUBQ x x) 15286 // cond: 15287 // result: (MOVQconst [0]) 15288 for { 15289 x := v.Args[0] 15290 if x != v.Args[1] { 15291 break 15292 } 15293 v.reset(OpAMD64MOVQconst) 15294 v.AuxInt = 0 15295 return true 15296 } 15297 return false 15298 } 15299 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool { 15300 b := v.Block 15301 _ = b 15302 // match: (SUBQconst [0] x) 15303 // cond: 15304 // result: x 15305 for { 15306 if v.AuxInt != 0 { 15307 break 15308 } 15309 x := v.Args[0] 15310 v.reset(OpCopy) 15311 v.Type = x.Type 15312 v.AddArg(x) 15313 return true 15314 } 15315 // match: (SUBQconst [c] x) 15316 // cond: c != -(1<<31) 15317 // result: (ADDQconst [-c] x) 15318 for { 15319 c := v.AuxInt 15320 x := v.Args[0] 15321 if !(c != -(1 << 31)) { 15322 break 15323 } 15324 v.reset(OpAMD64ADDQconst) 15325 v.AuxInt = -c 15326 v.AddArg(x) 15327 return true 15328 } 15329 // match: (SUBQconst (MOVQconst [d]) [c]) 15330 // cond: 15331 // result: (MOVQconst [d-c]) 15332 for { 15333 c := v.AuxInt 15334 v_0 := v.Args[0] 15335 if v_0.Op != OpAMD64MOVQconst { 15336 break 15337 } 15338 d := v_0.AuxInt 15339 v.reset(OpAMD64MOVQconst) 15340 v.AuxInt = d - c 15341 return true 15342 } 15343 // match: (SUBQconst (SUBQconst x [d]) [c]) 15344 // cond: is32Bit(-c-d) 15345 // result: (ADDQconst [-c-d] x) 15346 for { 15347 c := v.AuxInt 15348 v_0 := v.Args[0] 15349 if v_0.Op != OpAMD64SUBQconst { 15350 break 15351 } 15352 d := v_0.AuxInt 15353 x := v_0.Args[0] 15354 if !(is32Bit(-c - d)) { 15355 break 15356 } 15357 v.reset(OpAMD64ADDQconst) 15358 v.AuxInt = -c - d 15359 v.AddArg(x) 15360 return true 15361 } 15362 return false 15363 } 15364 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value, config *Config) bool { 15365 b := v.Block 15366 _ = b 15367 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 15368 // cond: is32Bit(off1+off2) 15369 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 15370 for { 15371 off1 := v.AuxInt 15372 sym := v.Aux 15373 val := v.Args[0] 15374 v_1 := v.Args[1] 15375 if v_1.Op != OpAMD64ADDQconst { 15376 break 15377 } 15378 off2 := v_1.AuxInt 15379 ptr := v_1.Args[0] 15380 mem := v.Args[2] 15381 if !(is32Bit(off1 + off2)) { 15382 break 15383 } 15384 v.reset(OpAMD64XADDLlock) 15385 v.AuxInt = off1 + off2 15386 v.Aux = sym 15387 v.AddArg(val) 15388 v.AddArg(ptr) 15389 v.AddArg(mem) 15390 return true 15391 } 15392 return false 15393 } 15394 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value, config *Config) bool { 15395 b := v.Block 15396 _ = b 15397 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 15398 // cond: is32Bit(off1+off2) 15399 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 15400 for { 15401 off1 := v.AuxInt 15402 sym := v.Aux 15403 val := v.Args[0] 15404 v_1 := v.Args[1] 15405 if v_1.Op != OpAMD64ADDQconst { 15406 break 15407 } 15408 off2 := v_1.AuxInt 15409 ptr := v_1.Args[0] 15410 mem := v.Args[2] 15411 if !(is32Bit(off1 + off2)) { 15412 break 15413 } 15414 v.reset(OpAMD64XADDQlock) 15415 v.AuxInt = off1 + off2 15416 v.Aux = sym 15417 v.AddArg(val) 15418 v.AddArg(ptr) 15419 v.AddArg(mem) 15420 return true 15421 } 15422 return false 15423 } 15424 func rewriteValueAMD64_OpAMD64XCHGL(v *Value, config *Config) bool { 15425 b := v.Block 15426 _ = b 15427 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 15428 // cond: is32Bit(off1+off2) 15429 // result: (XCHGL [off1+off2] {sym} val ptr mem) 15430 for { 15431 off1 := v.AuxInt 15432 sym := v.Aux 15433 val := v.Args[0] 15434 v_1 := v.Args[1] 15435 if v_1.Op != OpAMD64ADDQconst { 15436 break 15437 } 15438 off2 := v_1.AuxInt 15439 ptr := v_1.Args[0] 15440 mem := v.Args[2] 15441 if !(is32Bit(off1 + off2)) { 15442 break 15443 } 15444 v.reset(OpAMD64XCHGL) 15445 v.AuxInt = off1 + off2 15446 v.Aux = sym 15447 v.AddArg(val) 15448 v.AddArg(ptr) 15449 v.AddArg(mem) 15450 return true 15451 } 15452 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 15453 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 15454 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 15455 for { 15456 off1 := v.AuxInt 15457 sym1 := v.Aux 15458 val := v.Args[0] 15459 v_1 := v.Args[1] 15460 if v_1.Op != OpAMD64LEAQ { 15461 break 15462 } 15463 off2 := v_1.AuxInt 15464 sym2 := v_1.Aux 15465 ptr := v_1.Args[0] 15466 mem := v.Args[2] 15467 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 15468 break 15469 } 15470 v.reset(OpAMD64XCHGL) 15471 v.AuxInt = off1 + off2 15472 v.Aux = mergeSym(sym1, sym2) 15473 v.AddArg(val) 15474 v.AddArg(ptr) 15475 v.AddArg(mem) 15476 return true 15477 } 15478 return false 15479 } 15480 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value, config *Config) bool { 15481 b := v.Block 15482 _ = b 15483 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 15484 // cond: is32Bit(off1+off2) 15485 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 15486 for { 15487 off1 := v.AuxInt 15488 sym := v.Aux 15489 val := v.Args[0] 15490 v_1 := v.Args[1] 15491 if v_1.Op != OpAMD64ADDQconst { 15492 break 15493 } 15494 off2 := v_1.AuxInt 15495 ptr := v_1.Args[0] 15496 mem := v.Args[2] 15497 if !(is32Bit(off1 + off2)) { 15498 break 15499 } 15500 v.reset(OpAMD64XCHGQ) 15501 v.AuxInt = off1 + off2 15502 v.Aux = sym 15503 v.AddArg(val) 15504 v.AddArg(ptr) 15505 v.AddArg(mem) 15506 return true 15507 } 15508 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 15509 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 15510 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 15511 for { 15512 off1 := v.AuxInt 15513 sym1 := v.Aux 15514 val := v.Args[0] 15515 v_1 := v.Args[1] 15516 if v_1.Op != OpAMD64LEAQ { 15517 break 15518 } 15519 off2 := v_1.AuxInt 15520 sym2 := v_1.Aux 15521 ptr := v_1.Args[0] 15522 mem := v.Args[2] 15523 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 15524 break 15525 } 15526 v.reset(OpAMD64XCHGQ) 15527 v.AuxInt = off1 + off2 15528 v.Aux = mergeSym(sym1, sym2) 15529 v.AddArg(val) 15530 v.AddArg(ptr) 15531 v.AddArg(mem) 15532 return true 15533 } 15534 return false 15535 } 15536 func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool { 15537 b := v.Block 15538 _ = b 15539 // match: (XORL x (MOVLconst [c])) 15540 // cond: 15541 // result: (XORLconst [c] x) 15542 for { 15543 x := v.Args[0] 15544 v_1 := v.Args[1] 15545 if v_1.Op != OpAMD64MOVLconst { 15546 break 15547 } 15548 c := v_1.AuxInt 15549 v.reset(OpAMD64XORLconst) 15550 v.AuxInt = c 15551 v.AddArg(x) 15552 return true 15553 } 15554 // match: (XORL (MOVLconst [c]) x) 15555 // cond: 15556 // result: (XORLconst [c] x) 15557 for { 15558 v_0 := v.Args[0] 15559 if v_0.Op != OpAMD64MOVLconst { 15560 break 15561 } 15562 c := v_0.AuxInt 15563 x := v.Args[1] 15564 v.reset(OpAMD64XORLconst) 15565 v.AuxInt = c 15566 v.AddArg(x) 15567 return true 15568 } 15569 // match: (XORL (SHLLconst x [c]) (SHRLconst x [32-c])) 15570 // cond: 15571 // result: (ROLLconst x [ c]) 15572 for { 15573 v_0 := v.Args[0] 15574 if v_0.Op != OpAMD64SHLLconst { 15575 break 15576 } 15577 c := v_0.AuxInt 15578 x := v_0.Args[0] 15579 v_1 := v.Args[1] 15580 if v_1.Op != OpAMD64SHRLconst { 15581 break 15582 } 15583 if v_1.AuxInt != 32-c { 15584 break 15585 } 15586 if x != v_1.Args[0] { 15587 break 15588 } 15589 v.reset(OpAMD64ROLLconst) 15590 v.AuxInt = c 15591 v.AddArg(x) 15592 return true 15593 } 15594 // match: (XORL (SHRLconst x [c]) (SHLLconst x [32-c])) 15595 // cond: 15596 // result: (ROLLconst x [32-c]) 15597 for { 15598 v_0 := v.Args[0] 15599 if v_0.Op != OpAMD64SHRLconst { 15600 break 15601 } 15602 c := v_0.AuxInt 15603 x := v_0.Args[0] 15604 v_1 := v.Args[1] 15605 if v_1.Op != OpAMD64SHLLconst { 15606 break 15607 } 15608 if v_1.AuxInt != 32-c { 15609 break 15610 } 15611 if x != v_1.Args[0] { 15612 break 15613 } 15614 v.reset(OpAMD64ROLLconst) 15615 v.AuxInt = 32 - c 15616 v.AddArg(x) 15617 return true 15618 } 15619 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [16-c])) 15620 // cond: c < 16 && t.Size() == 2 15621 // result: (ROLWconst x [ c]) 15622 for { 15623 t := v.Type 15624 v_0 := v.Args[0] 15625 if v_0.Op != OpAMD64SHLLconst { 15626 break 15627 } 15628 c := v_0.AuxInt 15629 x := v_0.Args[0] 15630 v_1 := v.Args[1] 15631 if v_1.Op != OpAMD64SHRWconst { 15632 break 15633 } 15634 if v_1.AuxInt != 16-c { 15635 break 15636 } 15637 if x != v_1.Args[0] { 15638 break 15639 } 15640 if !(c < 16 && t.Size() == 2) { 15641 break 15642 } 15643 v.reset(OpAMD64ROLWconst) 15644 v.AuxInt = c 15645 v.AddArg(x) 15646 return true 15647 } 15648 // match: (XORL <t> (SHRWconst x [c]) (SHLLconst x [16-c])) 15649 // cond: c > 0 && t.Size() == 2 15650 // result: (ROLWconst x [16-c]) 15651 for { 15652 t := v.Type 15653 v_0 := v.Args[0] 15654 if v_0.Op != OpAMD64SHRWconst { 15655 break 15656 } 15657 c := v_0.AuxInt 15658 x := v_0.Args[0] 15659 v_1 := v.Args[1] 15660 if v_1.Op != OpAMD64SHLLconst { 15661 break 15662 } 15663 if v_1.AuxInt != 16-c { 15664 break 15665 } 15666 if x != v_1.Args[0] { 15667 break 15668 } 15669 if !(c > 0 && t.Size() == 2) { 15670 break 15671 } 15672 v.reset(OpAMD64ROLWconst) 15673 v.AuxInt = 16 - c 15674 v.AddArg(x) 15675 return true 15676 } 15677 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [ 8-c])) 15678 // cond: c < 8 && t.Size() == 1 15679 // result: (ROLBconst x [ c]) 15680 for { 15681 t := v.Type 15682 v_0 := v.Args[0] 15683 if v_0.Op != OpAMD64SHLLconst { 15684 break 15685 } 15686 c := v_0.AuxInt 15687 x := v_0.Args[0] 15688 v_1 := v.Args[1] 15689 if v_1.Op != OpAMD64SHRBconst { 15690 break 15691 } 15692 if v_1.AuxInt != 8-c { 15693 break 15694 } 15695 if x != v_1.Args[0] { 15696 break 15697 } 15698 if !(c < 8 && t.Size() == 1) { 15699 break 15700 } 15701 v.reset(OpAMD64ROLBconst) 15702 v.AuxInt = c 15703 v.AddArg(x) 15704 return true 15705 } 15706 // match: (XORL <t> (SHRBconst x [c]) (SHLLconst x [ 8-c])) 15707 // cond: c > 0 && t.Size() == 1 15708 // result: (ROLBconst x [ 8-c]) 15709 for { 15710 t := v.Type 15711 v_0 := v.Args[0] 15712 if v_0.Op != OpAMD64SHRBconst { 15713 break 15714 } 15715 c := v_0.AuxInt 15716 x := v_0.Args[0] 15717 v_1 := v.Args[1] 15718 if v_1.Op != OpAMD64SHLLconst { 15719 break 15720 } 15721 if v_1.AuxInt != 8-c { 15722 break 15723 } 15724 if x != v_1.Args[0] { 15725 break 15726 } 15727 if !(c > 0 && t.Size() == 1) { 15728 break 15729 } 15730 v.reset(OpAMD64ROLBconst) 15731 v.AuxInt = 8 - c 15732 v.AddArg(x) 15733 return true 15734 } 15735 // match: (XORL x x) 15736 // cond: 15737 // result: (MOVLconst [0]) 15738 for { 15739 x := v.Args[0] 15740 if x != v.Args[1] { 15741 break 15742 } 15743 v.reset(OpAMD64MOVLconst) 15744 v.AuxInt = 0 15745 return true 15746 } 15747 return false 15748 } 15749 func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool { 15750 b := v.Block 15751 _ = b 15752 // match: (XORLconst [c] (XORLconst [d] x)) 15753 // cond: 15754 // result: (XORLconst [c ^ d] x) 15755 for { 15756 c := v.AuxInt 15757 v_0 := v.Args[0] 15758 if v_0.Op != OpAMD64XORLconst { 15759 break 15760 } 15761 d := v_0.AuxInt 15762 x := v_0.Args[0] 15763 v.reset(OpAMD64XORLconst) 15764 v.AuxInt = c ^ d 15765 v.AddArg(x) 15766 return true 15767 } 15768 // match: (XORLconst [c] x) 15769 // cond: int32(c)==0 15770 // result: x 15771 for { 15772 c := v.AuxInt 15773 x := v.Args[0] 15774 if !(int32(c) == 0) { 15775 break 15776 } 15777 v.reset(OpCopy) 15778 v.Type = x.Type 15779 v.AddArg(x) 15780 return true 15781 } 15782 // match: (XORLconst [c] (MOVLconst [d])) 15783 // cond: 15784 // result: (MOVLconst [c^d]) 15785 for { 15786 c := v.AuxInt 15787 v_0 := v.Args[0] 15788 if v_0.Op != OpAMD64MOVLconst { 15789 break 15790 } 15791 d := v_0.AuxInt 15792 v.reset(OpAMD64MOVLconst) 15793 v.AuxInt = c ^ d 15794 return true 15795 } 15796 return false 15797 } 15798 func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool { 15799 b := v.Block 15800 _ = b 15801 // match: (XORQ x (MOVQconst [c])) 15802 // cond: is32Bit(c) 15803 // result: (XORQconst [c] x) 15804 for { 15805 x := v.Args[0] 15806 v_1 := v.Args[1] 15807 if v_1.Op != OpAMD64MOVQconst { 15808 break 15809 } 15810 c := v_1.AuxInt 15811 if !(is32Bit(c)) { 15812 break 15813 } 15814 v.reset(OpAMD64XORQconst) 15815 v.AuxInt = c 15816 v.AddArg(x) 15817 return true 15818 } 15819 // match: (XORQ (MOVQconst [c]) x) 15820 // cond: is32Bit(c) 15821 // result: (XORQconst [c] x) 15822 for { 15823 v_0 := v.Args[0] 15824 if v_0.Op != OpAMD64MOVQconst { 15825 break 15826 } 15827 c := v_0.AuxInt 15828 x := v.Args[1] 15829 if !(is32Bit(c)) { 15830 break 15831 } 15832 v.reset(OpAMD64XORQconst) 15833 v.AuxInt = c 15834 v.AddArg(x) 15835 return true 15836 } 15837 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [64-c])) 15838 // cond: 15839 // result: (ROLQconst x [ c]) 15840 for { 15841 v_0 := v.Args[0] 15842 if v_0.Op != OpAMD64SHLQconst { 15843 break 15844 } 15845 c := v_0.AuxInt 15846 x := v_0.Args[0] 15847 v_1 := v.Args[1] 15848 if v_1.Op != OpAMD64SHRQconst { 15849 break 15850 } 15851 if v_1.AuxInt != 64-c { 15852 break 15853 } 15854 if x != v_1.Args[0] { 15855 break 15856 } 15857 v.reset(OpAMD64ROLQconst) 15858 v.AuxInt = c 15859 v.AddArg(x) 15860 return true 15861 } 15862 // match: (XORQ (SHRQconst x [c]) (SHLQconst x [64-c])) 15863 // cond: 15864 // result: (ROLQconst x [64-c]) 15865 for { 15866 v_0 := v.Args[0] 15867 if v_0.Op != OpAMD64SHRQconst { 15868 break 15869 } 15870 c := v_0.AuxInt 15871 x := v_0.Args[0] 15872 v_1 := v.Args[1] 15873 if v_1.Op != OpAMD64SHLQconst { 15874 break 15875 } 15876 if v_1.AuxInt != 64-c { 15877 break 15878 } 15879 if x != v_1.Args[0] { 15880 break 15881 } 15882 v.reset(OpAMD64ROLQconst) 15883 v.AuxInt = 64 - c 15884 v.AddArg(x) 15885 return true 15886 } 15887 // match: (XORQ x x) 15888 // cond: 15889 // result: (MOVQconst [0]) 15890 for { 15891 x := v.Args[0] 15892 if x != v.Args[1] { 15893 break 15894 } 15895 v.reset(OpAMD64MOVQconst) 15896 v.AuxInt = 0 15897 return true 15898 } 15899 return false 15900 } 15901 func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool { 15902 b := v.Block 15903 _ = b 15904 // match: (XORQconst [c] (XORQconst [d] x)) 15905 // cond: 15906 // result: (XORQconst [c ^ d] x) 15907 for { 15908 c := v.AuxInt 15909 v_0 := v.Args[0] 15910 if v_0.Op != OpAMD64XORQconst { 15911 break 15912 } 15913 d := v_0.AuxInt 15914 x := v_0.Args[0] 15915 v.reset(OpAMD64XORQconst) 15916 v.AuxInt = c ^ d 15917 v.AddArg(x) 15918 return true 15919 } 15920 // match: (XORQconst [0] x) 15921 // cond: 15922 // result: x 15923 for { 15924 if v.AuxInt != 0 { 15925 break 15926 } 15927 x := v.Args[0] 15928 v.reset(OpCopy) 15929 v.Type = x.Type 15930 v.AddArg(x) 15931 return true 15932 } 15933 // match: (XORQconst [c] (MOVQconst [d])) 15934 // cond: 15935 // result: (MOVQconst [c^d]) 15936 for { 15937 c := v.AuxInt 15938 v_0 := v.Args[0] 15939 if v_0.Op != OpAMD64MOVQconst { 15940 break 15941 } 15942 d := v_0.AuxInt 15943 v.reset(OpAMD64MOVQconst) 15944 v.AuxInt = c ^ d 15945 return true 15946 } 15947 return false 15948 } 15949 func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { 15950 b := v.Block 15951 _ = b 15952 // match: (Add16 x y) 15953 // cond: 15954 // result: (ADDL x y) 15955 for { 15956 x := v.Args[0] 15957 y := v.Args[1] 15958 v.reset(OpAMD64ADDL) 15959 v.AddArg(x) 15960 v.AddArg(y) 15961 return true 15962 } 15963 } 15964 func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool { 15965 b := v.Block 15966 _ = b 15967 // match: (Add32 x y) 15968 // cond: 15969 // result: (ADDL x y) 15970 for { 15971 x := v.Args[0] 15972 y := v.Args[1] 15973 v.reset(OpAMD64ADDL) 15974 v.AddArg(x) 15975 v.AddArg(y) 15976 return true 15977 } 15978 } 15979 func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool { 15980 b := v.Block 15981 _ = b 15982 // match: (Add32F x y) 15983 // cond: 15984 // result: (ADDSS x y) 15985 for { 15986 x := v.Args[0] 15987 y := v.Args[1] 15988 v.reset(OpAMD64ADDSS) 15989 v.AddArg(x) 15990 v.AddArg(y) 15991 return true 15992 } 15993 } 15994 func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool { 15995 b := v.Block 15996 _ = b 15997 // match: (Add64 x y) 15998 // cond: 15999 // result: (ADDQ x y) 16000 for { 16001 x := v.Args[0] 16002 y := v.Args[1] 16003 v.reset(OpAMD64ADDQ) 16004 v.AddArg(x) 16005 v.AddArg(y) 16006 return true 16007 } 16008 } 16009 func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool { 16010 b := v.Block 16011 _ = b 16012 // match: (Add64F x y) 16013 // cond: 16014 // result: (ADDSD x y) 16015 for { 16016 x := v.Args[0] 16017 y := v.Args[1] 16018 v.reset(OpAMD64ADDSD) 16019 v.AddArg(x) 16020 v.AddArg(y) 16021 return true 16022 } 16023 } 16024 func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool { 16025 b := v.Block 16026 _ = b 16027 // match: (Add8 x y) 16028 // cond: 16029 // result: (ADDL x y) 16030 for { 16031 x := v.Args[0] 16032 y := v.Args[1] 16033 v.reset(OpAMD64ADDL) 16034 v.AddArg(x) 16035 v.AddArg(y) 16036 return true 16037 } 16038 } 16039 func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool { 16040 b := v.Block 16041 _ = b 16042 // match: (AddPtr x y) 16043 // cond: config.PtrSize == 8 16044 // result: (ADDQ x y) 16045 for { 16046 x := v.Args[0] 16047 y := v.Args[1] 16048 if !(config.PtrSize == 8) { 16049 break 16050 } 16051 v.reset(OpAMD64ADDQ) 16052 v.AddArg(x) 16053 v.AddArg(y) 16054 return true 16055 } 16056 // match: (AddPtr x y) 16057 // cond: config.PtrSize == 4 16058 // result: (ADDL x y) 16059 for { 16060 x := v.Args[0] 16061 y := v.Args[1] 16062 if !(config.PtrSize == 4) { 16063 break 16064 } 16065 v.reset(OpAMD64ADDL) 16066 v.AddArg(x) 16067 v.AddArg(y) 16068 return true 16069 } 16070 return false 16071 } 16072 func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool { 16073 b := v.Block 16074 _ = b 16075 // match: (Addr {sym} base) 16076 // cond: config.PtrSize == 8 16077 // result: (LEAQ {sym} base) 16078 for { 16079 sym := v.Aux 16080 base := v.Args[0] 16081 if !(config.PtrSize == 8) { 16082 break 16083 } 16084 v.reset(OpAMD64LEAQ) 16085 v.Aux = sym 16086 v.AddArg(base) 16087 return true 16088 } 16089 // match: (Addr {sym} base) 16090 // cond: config.PtrSize == 4 16091 // result: (LEAL {sym} base) 16092 for { 16093 sym := v.Aux 16094 base := v.Args[0] 16095 if !(config.PtrSize == 4) { 16096 break 16097 } 16098 v.reset(OpAMD64LEAL) 16099 v.Aux = sym 16100 v.AddArg(base) 16101 return true 16102 } 16103 return false 16104 } 16105 func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool { 16106 b := v.Block 16107 _ = b 16108 // match: (And16 x y) 16109 // cond: 16110 // result: (ANDL x y) 16111 for { 16112 x := v.Args[0] 16113 y := v.Args[1] 16114 v.reset(OpAMD64ANDL) 16115 v.AddArg(x) 16116 v.AddArg(y) 16117 return true 16118 } 16119 } 16120 func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool { 16121 b := v.Block 16122 _ = b 16123 // match: (And32 x y) 16124 // cond: 16125 // result: (ANDL x y) 16126 for { 16127 x := v.Args[0] 16128 y := v.Args[1] 16129 v.reset(OpAMD64ANDL) 16130 v.AddArg(x) 16131 v.AddArg(y) 16132 return true 16133 } 16134 } 16135 func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool { 16136 b := v.Block 16137 _ = b 16138 // match: (And64 x y) 16139 // cond: 16140 // result: (ANDQ x y) 16141 for { 16142 x := v.Args[0] 16143 y := v.Args[1] 16144 v.reset(OpAMD64ANDQ) 16145 v.AddArg(x) 16146 v.AddArg(y) 16147 return true 16148 } 16149 } 16150 func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool { 16151 b := v.Block 16152 _ = b 16153 // match: (And8 x y) 16154 // cond: 16155 // result: (ANDL x y) 16156 for { 16157 x := v.Args[0] 16158 y := v.Args[1] 16159 v.reset(OpAMD64ANDL) 16160 v.AddArg(x) 16161 v.AddArg(y) 16162 return true 16163 } 16164 } 16165 func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool { 16166 b := v.Block 16167 _ = b 16168 // match: (AndB x y) 16169 // cond: 16170 // result: (ANDL x y) 16171 for { 16172 x := v.Args[0] 16173 y := v.Args[1] 16174 v.reset(OpAMD64ANDL) 16175 v.AddArg(x) 16176 v.AddArg(y) 16177 return true 16178 } 16179 } 16180 func rewriteValueAMD64_OpAtomicAdd32(v *Value, config *Config) bool { 16181 b := v.Block 16182 _ = b 16183 // match: (AtomicAdd32 ptr val mem) 16184 // cond: 16185 // result: (AddTupleFirst32 (XADDLlock val ptr mem) val) 16186 for { 16187 ptr := v.Args[0] 16188 val := v.Args[1] 16189 mem := v.Args[2] 16190 v.reset(OpAMD64AddTupleFirst32) 16191 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem)) 16192 v0.AddArg(val) 16193 v0.AddArg(ptr) 16194 v0.AddArg(mem) 16195 v.AddArg(v0) 16196 v.AddArg(val) 16197 return true 16198 } 16199 } 16200 func rewriteValueAMD64_OpAtomicAdd64(v *Value, config *Config) bool { 16201 b := v.Block 16202 _ = b 16203 // match: (AtomicAdd64 ptr val mem) 16204 // cond: 16205 // result: (AddTupleFirst64 (XADDQlock val ptr mem) val) 16206 for { 16207 ptr := v.Args[0] 16208 val := v.Args[1] 16209 mem := v.Args[2] 16210 v.reset(OpAMD64AddTupleFirst64) 16211 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem)) 16212 v0.AddArg(val) 16213 v0.AddArg(ptr) 16214 v0.AddArg(mem) 16215 v.AddArg(v0) 16216 v.AddArg(val) 16217 return true 16218 } 16219 } 16220 func rewriteValueAMD64_OpAtomicAnd8(v *Value, config *Config) bool { 16221 b := v.Block 16222 _ = b 16223 // match: (AtomicAnd8 ptr val mem) 16224 // cond: 16225 // result: (ANDBlock ptr val mem) 16226 for { 16227 ptr := v.Args[0] 16228 val := v.Args[1] 16229 mem := v.Args[2] 16230 v.reset(OpAMD64ANDBlock) 16231 v.AddArg(ptr) 16232 v.AddArg(val) 16233 v.AddArg(mem) 16234 return true 16235 } 16236 } 16237 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value, config *Config) bool { 16238 b := v.Block 16239 _ = b 16240 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 16241 // cond: 16242 // result: (CMPXCHGLlock ptr old new_ mem) 16243 for { 16244 ptr := v.Args[0] 16245 old := v.Args[1] 16246 new_ := v.Args[2] 16247 mem := v.Args[3] 16248 v.reset(OpAMD64CMPXCHGLlock) 16249 v.AddArg(ptr) 16250 v.AddArg(old) 16251 v.AddArg(new_) 16252 v.AddArg(mem) 16253 return true 16254 } 16255 } 16256 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value, config *Config) bool { 16257 b := v.Block 16258 _ = b 16259 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 16260 // cond: 16261 // result: (CMPXCHGQlock ptr old new_ mem) 16262 for { 16263 ptr := v.Args[0] 16264 old := v.Args[1] 16265 new_ := v.Args[2] 16266 mem := v.Args[3] 16267 v.reset(OpAMD64CMPXCHGQlock) 16268 v.AddArg(ptr) 16269 v.AddArg(old) 16270 v.AddArg(new_) 16271 v.AddArg(mem) 16272 return true 16273 } 16274 } 16275 func rewriteValueAMD64_OpAtomicExchange32(v *Value, config *Config) bool { 16276 b := v.Block 16277 _ = b 16278 // match: (AtomicExchange32 ptr val mem) 16279 // cond: 16280 // result: (XCHGL val ptr mem) 16281 for { 16282 ptr := v.Args[0] 16283 val := v.Args[1] 16284 mem := v.Args[2] 16285 v.reset(OpAMD64XCHGL) 16286 v.AddArg(val) 16287 v.AddArg(ptr) 16288 v.AddArg(mem) 16289 return true 16290 } 16291 } 16292 func rewriteValueAMD64_OpAtomicExchange64(v *Value, config *Config) bool { 16293 b := v.Block 16294 _ = b 16295 // match: (AtomicExchange64 ptr val mem) 16296 // cond: 16297 // result: (XCHGQ val ptr mem) 16298 for { 16299 ptr := v.Args[0] 16300 val := v.Args[1] 16301 mem := v.Args[2] 16302 v.reset(OpAMD64XCHGQ) 16303 v.AddArg(val) 16304 v.AddArg(ptr) 16305 v.AddArg(mem) 16306 return true 16307 } 16308 } 16309 func rewriteValueAMD64_OpAtomicLoad32(v *Value, config *Config) bool { 16310 b := v.Block 16311 _ = b 16312 // match: (AtomicLoad32 ptr mem) 16313 // cond: 16314 // result: (MOVLatomicload ptr mem) 16315 for { 16316 ptr := v.Args[0] 16317 mem := v.Args[1] 16318 v.reset(OpAMD64MOVLatomicload) 16319 v.AddArg(ptr) 16320 v.AddArg(mem) 16321 return true 16322 } 16323 } 16324 func rewriteValueAMD64_OpAtomicLoad64(v *Value, config *Config) bool { 16325 b := v.Block 16326 _ = b 16327 // match: (AtomicLoad64 ptr mem) 16328 // cond: 16329 // result: (MOVQatomicload ptr mem) 16330 for { 16331 ptr := v.Args[0] 16332 mem := v.Args[1] 16333 v.reset(OpAMD64MOVQatomicload) 16334 v.AddArg(ptr) 16335 v.AddArg(mem) 16336 return true 16337 } 16338 } 16339 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value, config *Config) bool { 16340 b := v.Block 16341 _ = b 16342 // match: (AtomicLoadPtr ptr mem) 16343 // cond: config.PtrSize == 8 16344 // result: (MOVQatomicload ptr mem) 16345 for { 16346 ptr := v.Args[0] 16347 mem := v.Args[1] 16348 if !(config.PtrSize == 8) { 16349 break 16350 } 16351 v.reset(OpAMD64MOVQatomicload) 16352 v.AddArg(ptr) 16353 v.AddArg(mem) 16354 return true 16355 } 16356 // match: (AtomicLoadPtr ptr mem) 16357 // cond: config.PtrSize == 4 16358 // result: (MOVLatomicload ptr mem) 16359 for { 16360 ptr := v.Args[0] 16361 mem := v.Args[1] 16362 if !(config.PtrSize == 4) { 16363 break 16364 } 16365 v.reset(OpAMD64MOVLatomicload) 16366 v.AddArg(ptr) 16367 v.AddArg(mem) 16368 return true 16369 } 16370 return false 16371 } 16372 func rewriteValueAMD64_OpAtomicOr8(v *Value, config *Config) bool { 16373 b := v.Block 16374 _ = b 16375 // match: (AtomicOr8 ptr val mem) 16376 // cond: 16377 // result: (ORBlock ptr val mem) 16378 for { 16379 ptr := v.Args[0] 16380 val := v.Args[1] 16381 mem := v.Args[2] 16382 v.reset(OpAMD64ORBlock) 16383 v.AddArg(ptr) 16384 v.AddArg(val) 16385 v.AddArg(mem) 16386 return true 16387 } 16388 } 16389 func rewriteValueAMD64_OpAtomicStore32(v *Value, config *Config) bool { 16390 b := v.Block 16391 _ = b 16392 // match: (AtomicStore32 ptr val mem) 16393 // cond: 16394 // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeUInt32(),TypeMem)> val ptr mem)) 16395 for { 16396 ptr := v.Args[0] 16397 val := v.Args[1] 16398 mem := v.Args[2] 16399 v.reset(OpSelect1) 16400 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeUInt32(), TypeMem)) 16401 v0.AddArg(val) 16402 v0.AddArg(ptr) 16403 v0.AddArg(mem) 16404 v.AddArg(v0) 16405 return true 16406 } 16407 } 16408 func rewriteValueAMD64_OpAtomicStore64(v *Value, config *Config) bool { 16409 b := v.Block 16410 _ = b 16411 // match: (AtomicStore64 ptr val mem) 16412 // cond: 16413 // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeUInt64(),TypeMem)> val ptr mem)) 16414 for { 16415 ptr := v.Args[0] 16416 val := v.Args[1] 16417 mem := v.Args[2] 16418 v.reset(OpSelect1) 16419 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeUInt64(), TypeMem)) 16420 v0.AddArg(val) 16421 v0.AddArg(ptr) 16422 v0.AddArg(mem) 16423 v.AddArg(v0) 16424 return true 16425 } 16426 } 16427 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value, config *Config) bool { 16428 b := v.Block 16429 _ = b 16430 // match: (AtomicStorePtrNoWB ptr val mem) 16431 // cond: config.PtrSize == 8 16432 // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 16433 for { 16434 ptr := v.Args[0] 16435 val := v.Args[1] 16436 mem := v.Args[2] 16437 if !(config.PtrSize == 8) { 16438 break 16439 } 16440 v.reset(OpSelect1) 16441 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem)) 16442 v0.AddArg(val) 16443 v0.AddArg(ptr) 16444 v0.AddArg(mem) 16445 v.AddArg(v0) 16446 return true 16447 } 16448 // match: (AtomicStorePtrNoWB ptr val mem) 16449 // cond: config.PtrSize == 4 16450 // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem)) 16451 for { 16452 ptr := v.Args[0] 16453 val := v.Args[1] 16454 mem := v.Args[2] 16455 if !(config.PtrSize == 4) { 16456 break 16457 } 16458 v.reset(OpSelect1) 16459 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem)) 16460 v0.AddArg(val) 16461 v0.AddArg(ptr) 16462 v0.AddArg(mem) 16463 v.AddArg(v0) 16464 return true 16465 } 16466 return false 16467 } 16468 func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool { 16469 b := v.Block 16470 _ = b 16471 // match: (Avg64u x y) 16472 // cond: 16473 // result: (AVGQU x y) 16474 for { 16475 x := v.Args[0] 16476 y := v.Args[1] 16477 v.reset(OpAMD64AVGQU) 16478 v.AddArg(x) 16479 v.AddArg(y) 16480 return true 16481 } 16482 } 16483 func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool { 16484 b := v.Block 16485 _ = b 16486 // match: (Bswap32 x) 16487 // cond: 16488 // result: (BSWAPL x) 16489 for { 16490 x := v.Args[0] 16491 v.reset(OpAMD64BSWAPL) 16492 v.AddArg(x) 16493 return true 16494 } 16495 } 16496 func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool { 16497 b := v.Block 16498 _ = b 16499 // match: (Bswap64 x) 16500 // cond: 16501 // result: (BSWAPQ x) 16502 for { 16503 x := v.Args[0] 16504 v.reset(OpAMD64BSWAPQ) 16505 v.AddArg(x) 16506 return true 16507 } 16508 } 16509 func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool { 16510 b := v.Block 16511 _ = b 16512 // match: (ClosureCall [argwid] entry closure mem) 16513 // cond: 16514 // result: (CALLclosure [argwid] entry closure mem) 16515 for { 16516 argwid := v.AuxInt 16517 entry := v.Args[0] 16518 closure := v.Args[1] 16519 mem := v.Args[2] 16520 v.reset(OpAMD64CALLclosure) 16521 v.AuxInt = argwid 16522 v.AddArg(entry) 16523 v.AddArg(closure) 16524 v.AddArg(mem) 16525 return true 16526 } 16527 } 16528 func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool { 16529 b := v.Block 16530 _ = b 16531 // match: (Com16 x) 16532 // cond: 16533 // result: (NOTL x) 16534 for { 16535 x := v.Args[0] 16536 v.reset(OpAMD64NOTL) 16537 v.AddArg(x) 16538 return true 16539 } 16540 } 16541 func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool { 16542 b := v.Block 16543 _ = b 16544 // match: (Com32 x) 16545 // cond: 16546 // result: (NOTL x) 16547 for { 16548 x := v.Args[0] 16549 v.reset(OpAMD64NOTL) 16550 v.AddArg(x) 16551 return true 16552 } 16553 } 16554 func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool { 16555 b := v.Block 16556 _ = b 16557 // match: (Com64 x) 16558 // cond: 16559 // result: (NOTQ x) 16560 for { 16561 x := v.Args[0] 16562 v.reset(OpAMD64NOTQ) 16563 v.AddArg(x) 16564 return true 16565 } 16566 } 16567 func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool { 16568 b := v.Block 16569 _ = b 16570 // match: (Com8 x) 16571 // cond: 16572 // result: (NOTL x) 16573 for { 16574 x := v.Args[0] 16575 v.reset(OpAMD64NOTL) 16576 v.AddArg(x) 16577 return true 16578 } 16579 } 16580 func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool { 16581 b := v.Block 16582 _ = b 16583 // match: (Const16 [val]) 16584 // cond: 16585 // result: (MOVLconst [val]) 16586 for { 16587 val := v.AuxInt 16588 v.reset(OpAMD64MOVLconst) 16589 v.AuxInt = val 16590 return true 16591 } 16592 } 16593 func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool { 16594 b := v.Block 16595 _ = b 16596 // match: (Const32 [val]) 16597 // cond: 16598 // result: (MOVLconst [val]) 16599 for { 16600 val := v.AuxInt 16601 v.reset(OpAMD64MOVLconst) 16602 v.AuxInt = val 16603 return true 16604 } 16605 } 16606 func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool { 16607 b := v.Block 16608 _ = b 16609 // match: (Const32F [val]) 16610 // cond: 16611 // result: (MOVSSconst [val]) 16612 for { 16613 val := v.AuxInt 16614 v.reset(OpAMD64MOVSSconst) 16615 v.AuxInt = val 16616 return true 16617 } 16618 } 16619 func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool { 16620 b := v.Block 16621 _ = b 16622 // match: (Const64 [val]) 16623 // cond: 16624 // result: (MOVQconst [val]) 16625 for { 16626 val := v.AuxInt 16627 v.reset(OpAMD64MOVQconst) 16628 v.AuxInt = val 16629 return true 16630 } 16631 } 16632 func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool { 16633 b := v.Block 16634 _ = b 16635 // match: (Const64F [val]) 16636 // cond: 16637 // result: (MOVSDconst [val]) 16638 for { 16639 val := v.AuxInt 16640 v.reset(OpAMD64MOVSDconst) 16641 v.AuxInt = val 16642 return true 16643 } 16644 } 16645 func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool { 16646 b := v.Block 16647 _ = b 16648 // match: (Const8 [val]) 16649 // cond: 16650 // result: (MOVLconst [val]) 16651 for { 16652 val := v.AuxInt 16653 v.reset(OpAMD64MOVLconst) 16654 v.AuxInt = val 16655 return true 16656 } 16657 } 16658 func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool { 16659 b := v.Block 16660 _ = b 16661 // match: (ConstBool [b]) 16662 // cond: 16663 // result: (MOVLconst [b]) 16664 for { 16665 b := v.AuxInt 16666 v.reset(OpAMD64MOVLconst) 16667 v.AuxInt = b 16668 return true 16669 } 16670 } 16671 func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool { 16672 b := v.Block 16673 _ = b 16674 // match: (ConstNil) 16675 // cond: config.PtrSize == 8 16676 // result: (MOVQconst [0]) 16677 for { 16678 if !(config.PtrSize == 8) { 16679 break 16680 } 16681 v.reset(OpAMD64MOVQconst) 16682 v.AuxInt = 0 16683 return true 16684 } 16685 // match: (ConstNil) 16686 // cond: config.PtrSize == 4 16687 // result: (MOVLconst [0]) 16688 for { 16689 if !(config.PtrSize == 4) { 16690 break 16691 } 16692 v.reset(OpAMD64MOVLconst) 16693 v.AuxInt = 0 16694 return true 16695 } 16696 return false 16697 } 16698 func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool { 16699 b := v.Block 16700 _ = b 16701 // match: (Convert <t> x mem) 16702 // cond: config.PtrSize == 8 16703 // result: (MOVQconvert <t> x mem) 16704 for { 16705 t := v.Type 16706 x := v.Args[0] 16707 mem := v.Args[1] 16708 if !(config.PtrSize == 8) { 16709 break 16710 } 16711 v.reset(OpAMD64MOVQconvert) 16712 v.Type = t 16713 v.AddArg(x) 16714 v.AddArg(mem) 16715 return true 16716 } 16717 // match: (Convert <t> x mem) 16718 // cond: config.PtrSize == 4 16719 // result: (MOVLconvert <t> x mem) 16720 for { 16721 t := v.Type 16722 x := v.Args[0] 16723 mem := v.Args[1] 16724 if !(config.PtrSize == 4) { 16725 break 16726 } 16727 v.reset(OpAMD64MOVLconvert) 16728 v.Type = t 16729 v.AddArg(x) 16730 v.AddArg(mem) 16731 return true 16732 } 16733 return false 16734 } 16735 func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool { 16736 b := v.Block 16737 _ = b 16738 // match: (Ctz32 <t> x) 16739 // cond: 16740 // result: (CMOVLEQ (Select0 <t> (BSFL x)) (MOVLconst <t> [32]) (Select1 <TypeFlags> (BSFL x))) 16741 for { 16742 t := v.Type 16743 x := v.Args[0] 16744 v.reset(OpAMD64CMOVLEQ) 16745 v0 := b.NewValue0(v.Pos, OpSelect0, t) 16746 v1 := b.NewValue0(v.Pos, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags)) 16747 v1.AddArg(x) 16748 v0.AddArg(v1) 16749 v.AddArg(v0) 16750 v2 := b.NewValue0(v.Pos, OpAMD64MOVLconst, t) 16751 v2.AuxInt = 32 16752 v.AddArg(v2) 16753 v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags) 16754 v4 := b.NewValue0(v.Pos, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags)) 16755 v4.AddArg(x) 16756 v3.AddArg(v4) 16757 v.AddArg(v3) 16758 return true 16759 } 16760 } 16761 func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool { 16762 b := v.Block 16763 _ = b 16764 // match: (Ctz64 <t> x) 16765 // cond: 16766 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x))) 16767 for { 16768 t := v.Type 16769 x := v.Args[0] 16770 v.reset(OpAMD64CMOVQEQ) 16771 v0 := b.NewValue0(v.Pos, OpSelect0, t) 16772 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags)) 16773 v1.AddArg(x) 16774 v0.AddArg(v1) 16775 v.AddArg(v0) 16776 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 16777 v2.AuxInt = 64 16778 v.AddArg(v2) 16779 v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags) 16780 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags)) 16781 v4.AddArg(x) 16782 v3.AddArg(v4) 16783 v.AddArg(v3) 16784 return true 16785 } 16786 } 16787 func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool { 16788 b := v.Block 16789 _ = b 16790 // match: (Cvt32Fto32 x) 16791 // cond: 16792 // result: (CVTTSS2SL x) 16793 for { 16794 x := v.Args[0] 16795 v.reset(OpAMD64CVTTSS2SL) 16796 v.AddArg(x) 16797 return true 16798 } 16799 } 16800 func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool { 16801 b := v.Block 16802 _ = b 16803 // match: (Cvt32Fto64 x) 16804 // cond: 16805 // result: (CVTTSS2SQ x) 16806 for { 16807 x := v.Args[0] 16808 v.reset(OpAMD64CVTTSS2SQ) 16809 v.AddArg(x) 16810 return true 16811 } 16812 } 16813 func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool { 16814 b := v.Block 16815 _ = b 16816 // match: (Cvt32Fto64F x) 16817 // cond: 16818 // result: (CVTSS2SD x) 16819 for { 16820 x := v.Args[0] 16821 v.reset(OpAMD64CVTSS2SD) 16822 v.AddArg(x) 16823 return true 16824 } 16825 } 16826 func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool { 16827 b := v.Block 16828 _ = b 16829 // match: (Cvt32to32F x) 16830 // cond: 16831 // result: (CVTSL2SS x) 16832 for { 16833 x := v.Args[0] 16834 v.reset(OpAMD64CVTSL2SS) 16835 v.AddArg(x) 16836 return true 16837 } 16838 } 16839 func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool { 16840 b := v.Block 16841 _ = b 16842 // match: (Cvt32to64F x) 16843 // cond: 16844 // result: (CVTSL2SD x) 16845 for { 16846 x := v.Args[0] 16847 v.reset(OpAMD64CVTSL2SD) 16848 v.AddArg(x) 16849 return true 16850 } 16851 } 16852 func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool { 16853 b := v.Block 16854 _ = b 16855 // match: (Cvt64Fto32 x) 16856 // cond: 16857 // result: (CVTTSD2SL x) 16858 for { 16859 x := v.Args[0] 16860 v.reset(OpAMD64CVTTSD2SL) 16861 v.AddArg(x) 16862 return true 16863 } 16864 } 16865 func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool { 16866 b := v.Block 16867 _ = b 16868 // match: (Cvt64Fto32F x) 16869 // cond: 16870 // result: (CVTSD2SS x) 16871 for { 16872 x := v.Args[0] 16873 v.reset(OpAMD64CVTSD2SS) 16874 v.AddArg(x) 16875 return true 16876 } 16877 } 16878 func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool { 16879 b := v.Block 16880 _ = b 16881 // match: (Cvt64Fto64 x) 16882 // cond: 16883 // result: (CVTTSD2SQ x) 16884 for { 16885 x := v.Args[0] 16886 v.reset(OpAMD64CVTTSD2SQ) 16887 v.AddArg(x) 16888 return true 16889 } 16890 } 16891 func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool { 16892 b := v.Block 16893 _ = b 16894 // match: (Cvt64to32F x) 16895 // cond: 16896 // result: (CVTSQ2SS x) 16897 for { 16898 x := v.Args[0] 16899 v.reset(OpAMD64CVTSQ2SS) 16900 v.AddArg(x) 16901 return true 16902 } 16903 } 16904 func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool { 16905 b := v.Block 16906 _ = b 16907 // match: (Cvt64to64F x) 16908 // cond: 16909 // result: (CVTSQ2SD x) 16910 for { 16911 x := v.Args[0] 16912 v.reset(OpAMD64CVTSQ2SD) 16913 v.AddArg(x) 16914 return true 16915 } 16916 } 16917 func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool { 16918 b := v.Block 16919 _ = b 16920 // match: (DeferCall [argwid] mem) 16921 // cond: 16922 // result: (CALLdefer [argwid] mem) 16923 for { 16924 argwid := v.AuxInt 16925 mem := v.Args[0] 16926 v.reset(OpAMD64CALLdefer) 16927 v.AuxInt = argwid 16928 v.AddArg(mem) 16929 return true 16930 } 16931 } 16932 func rewriteValueAMD64_OpDiv128u(v *Value, config *Config) bool { 16933 b := v.Block 16934 _ = b 16935 // match: (Div128u xhi xlo y) 16936 // cond: 16937 // result: (DIVQU2 xhi xlo y) 16938 for { 16939 xhi := v.Args[0] 16940 xlo := v.Args[1] 16941 y := v.Args[2] 16942 v.reset(OpAMD64DIVQU2) 16943 v.AddArg(xhi) 16944 v.AddArg(xlo) 16945 v.AddArg(y) 16946 return true 16947 } 16948 } 16949 func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool { 16950 b := v.Block 16951 _ = b 16952 // match: (Div16 x y) 16953 // cond: 16954 // result: (Select0 (DIVW x y)) 16955 for { 16956 x := v.Args[0] 16957 y := v.Args[1] 16958 v.reset(OpSelect0) 16959 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 16960 v0.AddArg(x) 16961 v0.AddArg(y) 16962 v.AddArg(v0) 16963 return true 16964 } 16965 } 16966 func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool { 16967 b := v.Block 16968 _ = b 16969 // match: (Div16u x y) 16970 // cond: 16971 // result: (Select0 (DIVWU x y)) 16972 for { 16973 x := v.Args[0] 16974 y := v.Args[1] 16975 v.reset(OpSelect0) 16976 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 16977 v0.AddArg(x) 16978 v0.AddArg(y) 16979 v.AddArg(v0) 16980 return true 16981 } 16982 } 16983 func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool { 16984 b := v.Block 16985 _ = b 16986 // match: (Div32 x y) 16987 // cond: 16988 // result: (Select0 (DIVL x y)) 16989 for { 16990 x := v.Args[0] 16991 y := v.Args[1] 16992 v.reset(OpSelect0) 16993 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32())) 16994 v0.AddArg(x) 16995 v0.AddArg(y) 16996 v.AddArg(v0) 16997 return true 16998 } 16999 } 17000 func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool { 17001 b := v.Block 17002 _ = b 17003 // match: (Div32F x y) 17004 // cond: 17005 // result: (DIVSS x y) 17006 for { 17007 x := v.Args[0] 17008 y := v.Args[1] 17009 v.reset(OpAMD64DIVSS) 17010 v.AddArg(x) 17011 v.AddArg(y) 17012 return true 17013 } 17014 } 17015 func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool { 17016 b := v.Block 17017 _ = b 17018 // match: (Div32u x y) 17019 // cond: 17020 // result: (Select0 (DIVLU x y)) 17021 for { 17022 x := v.Args[0] 17023 y := v.Args[1] 17024 v.reset(OpSelect0) 17025 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32())) 17026 v0.AddArg(x) 17027 v0.AddArg(y) 17028 v.AddArg(v0) 17029 return true 17030 } 17031 } 17032 func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool { 17033 b := v.Block 17034 _ = b 17035 // match: (Div64 x y) 17036 // cond: 17037 // result: (Select0 (DIVQ x y)) 17038 for { 17039 x := v.Args[0] 17040 y := v.Args[1] 17041 v.reset(OpSelect0) 17042 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64())) 17043 v0.AddArg(x) 17044 v0.AddArg(y) 17045 v.AddArg(v0) 17046 return true 17047 } 17048 } 17049 func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool { 17050 b := v.Block 17051 _ = b 17052 // match: (Div64F x y) 17053 // cond: 17054 // result: (DIVSD x y) 17055 for { 17056 x := v.Args[0] 17057 y := v.Args[1] 17058 v.reset(OpAMD64DIVSD) 17059 v.AddArg(x) 17060 v.AddArg(y) 17061 return true 17062 } 17063 } 17064 func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool { 17065 b := v.Block 17066 _ = b 17067 // match: (Div64u x y) 17068 // cond: 17069 // result: (Select0 (DIVQU x y)) 17070 for { 17071 x := v.Args[0] 17072 y := v.Args[1] 17073 v.reset(OpSelect0) 17074 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64())) 17075 v0.AddArg(x) 17076 v0.AddArg(y) 17077 v.AddArg(v0) 17078 return true 17079 } 17080 } 17081 func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool { 17082 b := v.Block 17083 _ = b 17084 // match: (Div8 x y) 17085 // cond: 17086 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 17087 for { 17088 x := v.Args[0] 17089 y := v.Args[1] 17090 v.reset(OpSelect0) 17091 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 17092 v1 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16()) 17093 v1.AddArg(x) 17094 v0.AddArg(v1) 17095 v2 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16()) 17096 v2.AddArg(y) 17097 v0.AddArg(v2) 17098 v.AddArg(v0) 17099 return true 17100 } 17101 } 17102 func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool { 17103 b := v.Block 17104 _ = b 17105 // match: (Div8u x y) 17106 // cond: 17107 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 17108 for { 17109 x := v.Args[0] 17110 y := v.Args[1] 17111 v.reset(OpSelect0) 17112 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 17113 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16()) 17114 v1.AddArg(x) 17115 v0.AddArg(v1) 17116 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16()) 17117 v2.AddArg(y) 17118 v0.AddArg(v2) 17119 v.AddArg(v0) 17120 return true 17121 } 17122 } 17123 func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool { 17124 b := v.Block 17125 _ = b 17126 // match: (Eq16 x y) 17127 // cond: 17128 // result: (SETEQ (CMPW x y)) 17129 for { 17130 x := v.Args[0] 17131 y := v.Args[1] 17132 v.reset(OpAMD64SETEQ) 17133 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 17134 v0.AddArg(x) 17135 v0.AddArg(y) 17136 v.AddArg(v0) 17137 return true 17138 } 17139 } 17140 func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool { 17141 b := v.Block 17142 _ = b 17143 // match: (Eq32 x y) 17144 // cond: 17145 // result: (SETEQ (CMPL x y)) 17146 for { 17147 x := v.Args[0] 17148 y := v.Args[1] 17149 v.reset(OpAMD64SETEQ) 17150 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17151 v0.AddArg(x) 17152 v0.AddArg(y) 17153 v.AddArg(v0) 17154 return true 17155 } 17156 } 17157 func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool { 17158 b := v.Block 17159 _ = b 17160 // match: (Eq32F x y) 17161 // cond: 17162 // result: (SETEQF (UCOMISS x y)) 17163 for { 17164 x := v.Args[0] 17165 y := v.Args[1] 17166 v.reset(OpAMD64SETEQF) 17167 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 17168 v0.AddArg(x) 17169 v0.AddArg(y) 17170 v.AddArg(v0) 17171 return true 17172 } 17173 } 17174 func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool { 17175 b := v.Block 17176 _ = b 17177 // match: (Eq64 x y) 17178 // cond: 17179 // result: (SETEQ (CMPQ x y)) 17180 for { 17181 x := v.Args[0] 17182 y := v.Args[1] 17183 v.reset(OpAMD64SETEQ) 17184 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17185 v0.AddArg(x) 17186 v0.AddArg(y) 17187 v.AddArg(v0) 17188 return true 17189 } 17190 } 17191 func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool { 17192 b := v.Block 17193 _ = b 17194 // match: (Eq64F x y) 17195 // cond: 17196 // result: (SETEQF (UCOMISD x y)) 17197 for { 17198 x := v.Args[0] 17199 y := v.Args[1] 17200 v.reset(OpAMD64SETEQF) 17201 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 17202 v0.AddArg(x) 17203 v0.AddArg(y) 17204 v.AddArg(v0) 17205 return true 17206 } 17207 } 17208 func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool { 17209 b := v.Block 17210 _ = b 17211 // match: (Eq8 x y) 17212 // cond: 17213 // result: (SETEQ (CMPB x y)) 17214 for { 17215 x := v.Args[0] 17216 y := v.Args[1] 17217 v.reset(OpAMD64SETEQ) 17218 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 17219 v0.AddArg(x) 17220 v0.AddArg(y) 17221 v.AddArg(v0) 17222 return true 17223 } 17224 } 17225 func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool { 17226 b := v.Block 17227 _ = b 17228 // match: (EqB x y) 17229 // cond: 17230 // result: (SETEQ (CMPB x y)) 17231 for { 17232 x := v.Args[0] 17233 y := v.Args[1] 17234 v.reset(OpAMD64SETEQ) 17235 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 17236 v0.AddArg(x) 17237 v0.AddArg(y) 17238 v.AddArg(v0) 17239 return true 17240 } 17241 } 17242 func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool { 17243 b := v.Block 17244 _ = b 17245 // match: (EqPtr x y) 17246 // cond: config.PtrSize == 8 17247 // result: (SETEQ (CMPQ x y)) 17248 for { 17249 x := v.Args[0] 17250 y := v.Args[1] 17251 if !(config.PtrSize == 8) { 17252 break 17253 } 17254 v.reset(OpAMD64SETEQ) 17255 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17256 v0.AddArg(x) 17257 v0.AddArg(y) 17258 v.AddArg(v0) 17259 return true 17260 } 17261 // match: (EqPtr x y) 17262 // cond: config.PtrSize == 4 17263 // result: (SETEQ (CMPL x y)) 17264 for { 17265 x := v.Args[0] 17266 y := v.Args[1] 17267 if !(config.PtrSize == 4) { 17268 break 17269 } 17270 v.reset(OpAMD64SETEQ) 17271 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17272 v0.AddArg(x) 17273 v0.AddArg(y) 17274 v.AddArg(v0) 17275 return true 17276 } 17277 return false 17278 } 17279 func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool { 17280 b := v.Block 17281 _ = b 17282 // match: (Geq16 x y) 17283 // cond: 17284 // result: (SETGE (CMPW x y)) 17285 for { 17286 x := v.Args[0] 17287 y := v.Args[1] 17288 v.reset(OpAMD64SETGE) 17289 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 17290 v0.AddArg(x) 17291 v0.AddArg(y) 17292 v.AddArg(v0) 17293 return true 17294 } 17295 } 17296 func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool { 17297 b := v.Block 17298 _ = b 17299 // match: (Geq16U x y) 17300 // cond: 17301 // result: (SETAE (CMPW x y)) 17302 for { 17303 x := v.Args[0] 17304 y := v.Args[1] 17305 v.reset(OpAMD64SETAE) 17306 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 17307 v0.AddArg(x) 17308 v0.AddArg(y) 17309 v.AddArg(v0) 17310 return true 17311 } 17312 } 17313 func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool { 17314 b := v.Block 17315 _ = b 17316 // match: (Geq32 x y) 17317 // cond: 17318 // result: (SETGE (CMPL x y)) 17319 for { 17320 x := v.Args[0] 17321 y := v.Args[1] 17322 v.reset(OpAMD64SETGE) 17323 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17324 v0.AddArg(x) 17325 v0.AddArg(y) 17326 v.AddArg(v0) 17327 return true 17328 } 17329 } 17330 func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool { 17331 b := v.Block 17332 _ = b 17333 // match: (Geq32F x y) 17334 // cond: 17335 // result: (SETGEF (UCOMISS x y)) 17336 for { 17337 x := v.Args[0] 17338 y := v.Args[1] 17339 v.reset(OpAMD64SETGEF) 17340 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 17341 v0.AddArg(x) 17342 v0.AddArg(y) 17343 v.AddArg(v0) 17344 return true 17345 } 17346 } 17347 func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool { 17348 b := v.Block 17349 _ = b 17350 // match: (Geq32U x y) 17351 // cond: 17352 // result: (SETAE (CMPL x y)) 17353 for { 17354 x := v.Args[0] 17355 y := v.Args[1] 17356 v.reset(OpAMD64SETAE) 17357 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17358 v0.AddArg(x) 17359 v0.AddArg(y) 17360 v.AddArg(v0) 17361 return true 17362 } 17363 } 17364 func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool { 17365 b := v.Block 17366 _ = b 17367 // match: (Geq64 x y) 17368 // cond: 17369 // result: (SETGE (CMPQ x y)) 17370 for { 17371 x := v.Args[0] 17372 y := v.Args[1] 17373 v.reset(OpAMD64SETGE) 17374 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17375 v0.AddArg(x) 17376 v0.AddArg(y) 17377 v.AddArg(v0) 17378 return true 17379 } 17380 } 17381 func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool { 17382 b := v.Block 17383 _ = b 17384 // match: (Geq64F x y) 17385 // cond: 17386 // result: (SETGEF (UCOMISD x y)) 17387 for { 17388 x := v.Args[0] 17389 y := v.Args[1] 17390 v.reset(OpAMD64SETGEF) 17391 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 17392 v0.AddArg(x) 17393 v0.AddArg(y) 17394 v.AddArg(v0) 17395 return true 17396 } 17397 } 17398 func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool { 17399 b := v.Block 17400 _ = b 17401 // match: (Geq64U x y) 17402 // cond: 17403 // result: (SETAE (CMPQ x y)) 17404 for { 17405 x := v.Args[0] 17406 y := v.Args[1] 17407 v.reset(OpAMD64SETAE) 17408 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17409 v0.AddArg(x) 17410 v0.AddArg(y) 17411 v.AddArg(v0) 17412 return true 17413 } 17414 } 17415 func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool { 17416 b := v.Block 17417 _ = b 17418 // match: (Geq8 x y) 17419 // cond: 17420 // result: (SETGE (CMPB x y)) 17421 for { 17422 x := v.Args[0] 17423 y := v.Args[1] 17424 v.reset(OpAMD64SETGE) 17425 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 17426 v0.AddArg(x) 17427 v0.AddArg(y) 17428 v.AddArg(v0) 17429 return true 17430 } 17431 } 17432 func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool { 17433 b := v.Block 17434 _ = b 17435 // match: (Geq8U x y) 17436 // cond: 17437 // result: (SETAE (CMPB x y)) 17438 for { 17439 x := v.Args[0] 17440 y := v.Args[1] 17441 v.reset(OpAMD64SETAE) 17442 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 17443 v0.AddArg(x) 17444 v0.AddArg(y) 17445 v.AddArg(v0) 17446 return true 17447 } 17448 } 17449 func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool { 17450 b := v.Block 17451 _ = b 17452 // match: (GetClosurePtr) 17453 // cond: 17454 // result: (LoweredGetClosurePtr) 17455 for { 17456 v.reset(OpAMD64LoweredGetClosurePtr) 17457 return true 17458 } 17459 } 17460 func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool { 17461 b := v.Block 17462 _ = b 17463 // match: (GetG mem) 17464 // cond: 17465 // result: (LoweredGetG mem) 17466 for { 17467 mem := v.Args[0] 17468 v.reset(OpAMD64LoweredGetG) 17469 v.AddArg(mem) 17470 return true 17471 } 17472 } 17473 func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool { 17474 b := v.Block 17475 _ = b 17476 // match: (GoCall [argwid] mem) 17477 // cond: 17478 // result: (CALLgo [argwid] mem) 17479 for { 17480 argwid := v.AuxInt 17481 mem := v.Args[0] 17482 v.reset(OpAMD64CALLgo) 17483 v.AuxInt = argwid 17484 v.AddArg(mem) 17485 return true 17486 } 17487 } 17488 func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool { 17489 b := v.Block 17490 _ = b 17491 // match: (Greater16 x y) 17492 // cond: 17493 // result: (SETG (CMPW x y)) 17494 for { 17495 x := v.Args[0] 17496 y := v.Args[1] 17497 v.reset(OpAMD64SETG) 17498 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 17499 v0.AddArg(x) 17500 v0.AddArg(y) 17501 v.AddArg(v0) 17502 return true 17503 } 17504 } 17505 func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool { 17506 b := v.Block 17507 _ = b 17508 // match: (Greater16U x y) 17509 // cond: 17510 // result: (SETA (CMPW x y)) 17511 for { 17512 x := v.Args[0] 17513 y := v.Args[1] 17514 v.reset(OpAMD64SETA) 17515 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 17516 v0.AddArg(x) 17517 v0.AddArg(y) 17518 v.AddArg(v0) 17519 return true 17520 } 17521 } 17522 func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool { 17523 b := v.Block 17524 _ = b 17525 // match: (Greater32 x y) 17526 // cond: 17527 // result: (SETG (CMPL x y)) 17528 for { 17529 x := v.Args[0] 17530 y := v.Args[1] 17531 v.reset(OpAMD64SETG) 17532 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17533 v0.AddArg(x) 17534 v0.AddArg(y) 17535 v.AddArg(v0) 17536 return true 17537 } 17538 } 17539 func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool { 17540 b := v.Block 17541 _ = b 17542 // match: (Greater32F x y) 17543 // cond: 17544 // result: (SETGF (UCOMISS x y)) 17545 for { 17546 x := v.Args[0] 17547 y := v.Args[1] 17548 v.reset(OpAMD64SETGF) 17549 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 17550 v0.AddArg(x) 17551 v0.AddArg(y) 17552 v.AddArg(v0) 17553 return true 17554 } 17555 } 17556 func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool { 17557 b := v.Block 17558 _ = b 17559 // match: (Greater32U x y) 17560 // cond: 17561 // result: (SETA (CMPL x y)) 17562 for { 17563 x := v.Args[0] 17564 y := v.Args[1] 17565 v.reset(OpAMD64SETA) 17566 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17567 v0.AddArg(x) 17568 v0.AddArg(y) 17569 v.AddArg(v0) 17570 return true 17571 } 17572 } 17573 func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool { 17574 b := v.Block 17575 _ = b 17576 // match: (Greater64 x y) 17577 // cond: 17578 // result: (SETG (CMPQ x y)) 17579 for { 17580 x := v.Args[0] 17581 y := v.Args[1] 17582 v.reset(OpAMD64SETG) 17583 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17584 v0.AddArg(x) 17585 v0.AddArg(y) 17586 v.AddArg(v0) 17587 return true 17588 } 17589 } 17590 func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool { 17591 b := v.Block 17592 _ = b 17593 // match: (Greater64F x y) 17594 // cond: 17595 // result: (SETGF (UCOMISD x y)) 17596 for { 17597 x := v.Args[0] 17598 y := v.Args[1] 17599 v.reset(OpAMD64SETGF) 17600 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 17601 v0.AddArg(x) 17602 v0.AddArg(y) 17603 v.AddArg(v0) 17604 return true 17605 } 17606 } 17607 func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool { 17608 b := v.Block 17609 _ = b 17610 // match: (Greater64U x y) 17611 // cond: 17612 // result: (SETA (CMPQ x y)) 17613 for { 17614 x := v.Args[0] 17615 y := v.Args[1] 17616 v.reset(OpAMD64SETA) 17617 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17618 v0.AddArg(x) 17619 v0.AddArg(y) 17620 v.AddArg(v0) 17621 return true 17622 } 17623 } 17624 func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool { 17625 b := v.Block 17626 _ = b 17627 // match: (Greater8 x y) 17628 // cond: 17629 // result: (SETG (CMPB x y)) 17630 for { 17631 x := v.Args[0] 17632 y := v.Args[1] 17633 v.reset(OpAMD64SETG) 17634 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 17635 v0.AddArg(x) 17636 v0.AddArg(y) 17637 v.AddArg(v0) 17638 return true 17639 } 17640 } 17641 func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool { 17642 b := v.Block 17643 _ = b 17644 // match: (Greater8U x y) 17645 // cond: 17646 // result: (SETA (CMPB x y)) 17647 for { 17648 x := v.Args[0] 17649 y := v.Args[1] 17650 v.reset(OpAMD64SETA) 17651 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 17652 v0.AddArg(x) 17653 v0.AddArg(y) 17654 v.AddArg(v0) 17655 return true 17656 } 17657 } 17658 func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool { 17659 b := v.Block 17660 _ = b 17661 // match: (Hmul16 x y) 17662 // cond: 17663 // result: (HMULW x y) 17664 for { 17665 x := v.Args[0] 17666 y := v.Args[1] 17667 v.reset(OpAMD64HMULW) 17668 v.AddArg(x) 17669 v.AddArg(y) 17670 return true 17671 } 17672 } 17673 func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool { 17674 b := v.Block 17675 _ = b 17676 // match: (Hmul16u x y) 17677 // cond: 17678 // result: (HMULWU x y) 17679 for { 17680 x := v.Args[0] 17681 y := v.Args[1] 17682 v.reset(OpAMD64HMULWU) 17683 v.AddArg(x) 17684 v.AddArg(y) 17685 return true 17686 } 17687 } 17688 func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool { 17689 b := v.Block 17690 _ = b 17691 // match: (Hmul32 x y) 17692 // cond: 17693 // result: (HMULL x y) 17694 for { 17695 x := v.Args[0] 17696 y := v.Args[1] 17697 v.reset(OpAMD64HMULL) 17698 v.AddArg(x) 17699 v.AddArg(y) 17700 return true 17701 } 17702 } 17703 func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool { 17704 b := v.Block 17705 _ = b 17706 // match: (Hmul32u x y) 17707 // cond: 17708 // result: (HMULLU x y) 17709 for { 17710 x := v.Args[0] 17711 y := v.Args[1] 17712 v.reset(OpAMD64HMULLU) 17713 v.AddArg(x) 17714 v.AddArg(y) 17715 return true 17716 } 17717 } 17718 func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool { 17719 b := v.Block 17720 _ = b 17721 // match: (Hmul64 x y) 17722 // cond: 17723 // result: (HMULQ x y) 17724 for { 17725 x := v.Args[0] 17726 y := v.Args[1] 17727 v.reset(OpAMD64HMULQ) 17728 v.AddArg(x) 17729 v.AddArg(y) 17730 return true 17731 } 17732 } 17733 func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool { 17734 b := v.Block 17735 _ = b 17736 // match: (Hmul64u x y) 17737 // cond: 17738 // result: (HMULQU x y) 17739 for { 17740 x := v.Args[0] 17741 y := v.Args[1] 17742 v.reset(OpAMD64HMULQU) 17743 v.AddArg(x) 17744 v.AddArg(y) 17745 return true 17746 } 17747 } 17748 func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool { 17749 b := v.Block 17750 _ = b 17751 // match: (Hmul8 x y) 17752 // cond: 17753 // result: (HMULB x y) 17754 for { 17755 x := v.Args[0] 17756 y := v.Args[1] 17757 v.reset(OpAMD64HMULB) 17758 v.AddArg(x) 17759 v.AddArg(y) 17760 return true 17761 } 17762 } 17763 func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool { 17764 b := v.Block 17765 _ = b 17766 // match: (Hmul8u x y) 17767 // cond: 17768 // result: (HMULBU x y) 17769 for { 17770 x := v.Args[0] 17771 y := v.Args[1] 17772 v.reset(OpAMD64HMULBU) 17773 v.AddArg(x) 17774 v.AddArg(y) 17775 return true 17776 } 17777 } 17778 func rewriteValueAMD64_OpInt64Hi(v *Value, config *Config) bool { 17779 b := v.Block 17780 _ = b 17781 // match: (Int64Hi x) 17782 // cond: 17783 // result: (SHRQconst [32] x) 17784 for { 17785 x := v.Args[0] 17786 v.reset(OpAMD64SHRQconst) 17787 v.AuxInt = 32 17788 v.AddArg(x) 17789 return true 17790 } 17791 } 17792 func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool { 17793 b := v.Block 17794 _ = b 17795 // match: (InterCall [argwid] entry mem) 17796 // cond: 17797 // result: (CALLinter [argwid] entry mem) 17798 for { 17799 argwid := v.AuxInt 17800 entry := v.Args[0] 17801 mem := v.Args[1] 17802 v.reset(OpAMD64CALLinter) 17803 v.AuxInt = argwid 17804 v.AddArg(entry) 17805 v.AddArg(mem) 17806 return true 17807 } 17808 } 17809 func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool { 17810 b := v.Block 17811 _ = b 17812 // match: (IsInBounds idx len) 17813 // cond: 17814 // result: (SETB (CMPQ idx len)) 17815 for { 17816 idx := v.Args[0] 17817 len := v.Args[1] 17818 v.reset(OpAMD64SETB) 17819 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17820 v0.AddArg(idx) 17821 v0.AddArg(len) 17822 v.AddArg(v0) 17823 return true 17824 } 17825 } 17826 func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool { 17827 b := v.Block 17828 _ = b 17829 // match: (IsNonNil p) 17830 // cond: config.PtrSize == 8 17831 // result: (SETNE (TESTQ p p)) 17832 for { 17833 p := v.Args[0] 17834 if !(config.PtrSize == 8) { 17835 break 17836 } 17837 v.reset(OpAMD64SETNE) 17838 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, TypeFlags) 17839 v0.AddArg(p) 17840 v0.AddArg(p) 17841 v.AddArg(v0) 17842 return true 17843 } 17844 // match: (IsNonNil p) 17845 // cond: config.PtrSize == 4 17846 // result: (SETNE (TESTL p p)) 17847 for { 17848 p := v.Args[0] 17849 if !(config.PtrSize == 4) { 17850 break 17851 } 17852 v.reset(OpAMD64SETNE) 17853 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, TypeFlags) 17854 v0.AddArg(p) 17855 v0.AddArg(p) 17856 v.AddArg(v0) 17857 return true 17858 } 17859 return false 17860 } 17861 func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool { 17862 b := v.Block 17863 _ = b 17864 // match: (IsSliceInBounds idx len) 17865 // cond: 17866 // result: (SETBE (CMPQ idx len)) 17867 for { 17868 idx := v.Args[0] 17869 len := v.Args[1] 17870 v.reset(OpAMD64SETBE) 17871 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17872 v0.AddArg(idx) 17873 v0.AddArg(len) 17874 v.AddArg(v0) 17875 return true 17876 } 17877 } 17878 func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool { 17879 b := v.Block 17880 _ = b 17881 // match: (Leq16 x y) 17882 // cond: 17883 // result: (SETLE (CMPW x y)) 17884 for { 17885 x := v.Args[0] 17886 y := v.Args[1] 17887 v.reset(OpAMD64SETLE) 17888 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 17889 v0.AddArg(x) 17890 v0.AddArg(y) 17891 v.AddArg(v0) 17892 return true 17893 } 17894 } 17895 func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool { 17896 b := v.Block 17897 _ = b 17898 // match: (Leq16U x y) 17899 // cond: 17900 // result: (SETBE (CMPW x y)) 17901 for { 17902 x := v.Args[0] 17903 y := v.Args[1] 17904 v.reset(OpAMD64SETBE) 17905 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 17906 v0.AddArg(x) 17907 v0.AddArg(y) 17908 v.AddArg(v0) 17909 return true 17910 } 17911 } 17912 func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool { 17913 b := v.Block 17914 _ = b 17915 // match: (Leq32 x y) 17916 // cond: 17917 // result: (SETLE (CMPL x y)) 17918 for { 17919 x := v.Args[0] 17920 y := v.Args[1] 17921 v.reset(OpAMD64SETLE) 17922 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17923 v0.AddArg(x) 17924 v0.AddArg(y) 17925 v.AddArg(v0) 17926 return true 17927 } 17928 } 17929 func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool { 17930 b := v.Block 17931 _ = b 17932 // match: (Leq32F x y) 17933 // cond: 17934 // result: (SETGEF (UCOMISS y x)) 17935 for { 17936 x := v.Args[0] 17937 y := v.Args[1] 17938 v.reset(OpAMD64SETGEF) 17939 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 17940 v0.AddArg(y) 17941 v0.AddArg(x) 17942 v.AddArg(v0) 17943 return true 17944 } 17945 } 17946 func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool { 17947 b := v.Block 17948 _ = b 17949 // match: (Leq32U x y) 17950 // cond: 17951 // result: (SETBE (CMPL x y)) 17952 for { 17953 x := v.Args[0] 17954 y := v.Args[1] 17955 v.reset(OpAMD64SETBE) 17956 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 17957 v0.AddArg(x) 17958 v0.AddArg(y) 17959 v.AddArg(v0) 17960 return true 17961 } 17962 } 17963 func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool { 17964 b := v.Block 17965 _ = b 17966 // match: (Leq64 x y) 17967 // cond: 17968 // result: (SETLE (CMPQ x y)) 17969 for { 17970 x := v.Args[0] 17971 y := v.Args[1] 17972 v.reset(OpAMD64SETLE) 17973 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 17974 v0.AddArg(x) 17975 v0.AddArg(y) 17976 v.AddArg(v0) 17977 return true 17978 } 17979 } 17980 func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool { 17981 b := v.Block 17982 _ = b 17983 // match: (Leq64F x y) 17984 // cond: 17985 // result: (SETGEF (UCOMISD y x)) 17986 for { 17987 x := v.Args[0] 17988 y := v.Args[1] 17989 v.reset(OpAMD64SETGEF) 17990 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 17991 v0.AddArg(y) 17992 v0.AddArg(x) 17993 v.AddArg(v0) 17994 return true 17995 } 17996 } 17997 func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool { 17998 b := v.Block 17999 _ = b 18000 // match: (Leq64U x y) 18001 // cond: 18002 // result: (SETBE (CMPQ x y)) 18003 for { 18004 x := v.Args[0] 18005 y := v.Args[1] 18006 v.reset(OpAMD64SETBE) 18007 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 18008 v0.AddArg(x) 18009 v0.AddArg(y) 18010 v.AddArg(v0) 18011 return true 18012 } 18013 } 18014 func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool { 18015 b := v.Block 18016 _ = b 18017 // match: (Leq8 x y) 18018 // cond: 18019 // result: (SETLE (CMPB x y)) 18020 for { 18021 x := v.Args[0] 18022 y := v.Args[1] 18023 v.reset(OpAMD64SETLE) 18024 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 18025 v0.AddArg(x) 18026 v0.AddArg(y) 18027 v.AddArg(v0) 18028 return true 18029 } 18030 } 18031 func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool { 18032 b := v.Block 18033 _ = b 18034 // match: (Leq8U x y) 18035 // cond: 18036 // result: (SETBE (CMPB x y)) 18037 for { 18038 x := v.Args[0] 18039 y := v.Args[1] 18040 v.reset(OpAMD64SETBE) 18041 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 18042 v0.AddArg(x) 18043 v0.AddArg(y) 18044 v.AddArg(v0) 18045 return true 18046 } 18047 } 18048 func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool { 18049 b := v.Block 18050 _ = b 18051 // match: (Less16 x y) 18052 // cond: 18053 // result: (SETL (CMPW x y)) 18054 for { 18055 x := v.Args[0] 18056 y := v.Args[1] 18057 v.reset(OpAMD64SETL) 18058 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 18059 v0.AddArg(x) 18060 v0.AddArg(y) 18061 v.AddArg(v0) 18062 return true 18063 } 18064 } 18065 func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool { 18066 b := v.Block 18067 _ = b 18068 // match: (Less16U x y) 18069 // cond: 18070 // result: (SETB (CMPW x y)) 18071 for { 18072 x := v.Args[0] 18073 y := v.Args[1] 18074 v.reset(OpAMD64SETB) 18075 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 18076 v0.AddArg(x) 18077 v0.AddArg(y) 18078 v.AddArg(v0) 18079 return true 18080 } 18081 } 18082 func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool { 18083 b := v.Block 18084 _ = b 18085 // match: (Less32 x y) 18086 // cond: 18087 // result: (SETL (CMPL x y)) 18088 for { 18089 x := v.Args[0] 18090 y := v.Args[1] 18091 v.reset(OpAMD64SETL) 18092 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 18093 v0.AddArg(x) 18094 v0.AddArg(y) 18095 v.AddArg(v0) 18096 return true 18097 } 18098 } 18099 func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool { 18100 b := v.Block 18101 _ = b 18102 // match: (Less32F x y) 18103 // cond: 18104 // result: (SETGF (UCOMISS y x)) 18105 for { 18106 x := v.Args[0] 18107 y := v.Args[1] 18108 v.reset(OpAMD64SETGF) 18109 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 18110 v0.AddArg(y) 18111 v0.AddArg(x) 18112 v.AddArg(v0) 18113 return true 18114 } 18115 } 18116 func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool { 18117 b := v.Block 18118 _ = b 18119 // match: (Less32U x y) 18120 // cond: 18121 // result: (SETB (CMPL x y)) 18122 for { 18123 x := v.Args[0] 18124 y := v.Args[1] 18125 v.reset(OpAMD64SETB) 18126 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 18127 v0.AddArg(x) 18128 v0.AddArg(y) 18129 v.AddArg(v0) 18130 return true 18131 } 18132 } 18133 func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool { 18134 b := v.Block 18135 _ = b 18136 // match: (Less64 x y) 18137 // cond: 18138 // result: (SETL (CMPQ x y)) 18139 for { 18140 x := v.Args[0] 18141 y := v.Args[1] 18142 v.reset(OpAMD64SETL) 18143 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 18144 v0.AddArg(x) 18145 v0.AddArg(y) 18146 v.AddArg(v0) 18147 return true 18148 } 18149 } 18150 func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool { 18151 b := v.Block 18152 _ = b 18153 // match: (Less64F x y) 18154 // cond: 18155 // result: (SETGF (UCOMISD y x)) 18156 for { 18157 x := v.Args[0] 18158 y := v.Args[1] 18159 v.reset(OpAMD64SETGF) 18160 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 18161 v0.AddArg(y) 18162 v0.AddArg(x) 18163 v.AddArg(v0) 18164 return true 18165 } 18166 } 18167 func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool { 18168 b := v.Block 18169 _ = b 18170 // match: (Less64U x y) 18171 // cond: 18172 // result: (SETB (CMPQ x y)) 18173 for { 18174 x := v.Args[0] 18175 y := v.Args[1] 18176 v.reset(OpAMD64SETB) 18177 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 18178 v0.AddArg(x) 18179 v0.AddArg(y) 18180 v.AddArg(v0) 18181 return true 18182 } 18183 } 18184 func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool { 18185 b := v.Block 18186 _ = b 18187 // match: (Less8 x y) 18188 // cond: 18189 // result: (SETL (CMPB x y)) 18190 for { 18191 x := v.Args[0] 18192 y := v.Args[1] 18193 v.reset(OpAMD64SETL) 18194 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 18195 v0.AddArg(x) 18196 v0.AddArg(y) 18197 v.AddArg(v0) 18198 return true 18199 } 18200 } 18201 func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool { 18202 b := v.Block 18203 _ = b 18204 // match: (Less8U x y) 18205 // cond: 18206 // result: (SETB (CMPB x y)) 18207 for { 18208 x := v.Args[0] 18209 y := v.Args[1] 18210 v.reset(OpAMD64SETB) 18211 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 18212 v0.AddArg(x) 18213 v0.AddArg(y) 18214 v.AddArg(v0) 18215 return true 18216 } 18217 } 18218 func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool { 18219 b := v.Block 18220 _ = b 18221 // match: (Load <t> ptr mem) 18222 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 18223 // result: (MOVQload ptr mem) 18224 for { 18225 t := v.Type 18226 ptr := v.Args[0] 18227 mem := v.Args[1] 18228 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 18229 break 18230 } 18231 v.reset(OpAMD64MOVQload) 18232 v.AddArg(ptr) 18233 v.AddArg(mem) 18234 return true 18235 } 18236 // match: (Load <t> ptr mem) 18237 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 18238 // result: (MOVLload ptr mem) 18239 for { 18240 t := v.Type 18241 ptr := v.Args[0] 18242 mem := v.Args[1] 18243 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 18244 break 18245 } 18246 v.reset(OpAMD64MOVLload) 18247 v.AddArg(ptr) 18248 v.AddArg(mem) 18249 return true 18250 } 18251 // match: (Load <t> ptr mem) 18252 // cond: is16BitInt(t) 18253 // result: (MOVWload ptr mem) 18254 for { 18255 t := v.Type 18256 ptr := v.Args[0] 18257 mem := v.Args[1] 18258 if !(is16BitInt(t)) { 18259 break 18260 } 18261 v.reset(OpAMD64MOVWload) 18262 v.AddArg(ptr) 18263 v.AddArg(mem) 18264 return true 18265 } 18266 // match: (Load <t> ptr mem) 18267 // cond: (t.IsBoolean() || is8BitInt(t)) 18268 // result: (MOVBload ptr mem) 18269 for { 18270 t := v.Type 18271 ptr := v.Args[0] 18272 mem := v.Args[1] 18273 if !(t.IsBoolean() || is8BitInt(t)) { 18274 break 18275 } 18276 v.reset(OpAMD64MOVBload) 18277 v.AddArg(ptr) 18278 v.AddArg(mem) 18279 return true 18280 } 18281 // match: (Load <t> ptr mem) 18282 // cond: is32BitFloat(t) 18283 // result: (MOVSSload ptr mem) 18284 for { 18285 t := v.Type 18286 ptr := v.Args[0] 18287 mem := v.Args[1] 18288 if !(is32BitFloat(t)) { 18289 break 18290 } 18291 v.reset(OpAMD64MOVSSload) 18292 v.AddArg(ptr) 18293 v.AddArg(mem) 18294 return true 18295 } 18296 // match: (Load <t> ptr mem) 18297 // cond: is64BitFloat(t) 18298 // result: (MOVSDload ptr mem) 18299 for { 18300 t := v.Type 18301 ptr := v.Args[0] 18302 mem := v.Args[1] 18303 if !(is64BitFloat(t)) { 18304 break 18305 } 18306 v.reset(OpAMD64MOVSDload) 18307 v.AddArg(ptr) 18308 v.AddArg(mem) 18309 return true 18310 } 18311 return false 18312 } 18313 func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool { 18314 b := v.Block 18315 _ = b 18316 // match: (Lsh16x16 <t> x y) 18317 // cond: 18318 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 18319 for { 18320 t := v.Type 18321 x := v.Args[0] 18322 y := v.Args[1] 18323 v.reset(OpAMD64ANDL) 18324 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18325 v0.AddArg(x) 18326 v0.AddArg(y) 18327 v.AddArg(v0) 18328 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18329 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 18330 v2.AuxInt = 32 18331 v2.AddArg(y) 18332 v1.AddArg(v2) 18333 v.AddArg(v1) 18334 return true 18335 } 18336 } 18337 func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool { 18338 b := v.Block 18339 _ = b 18340 // match: (Lsh16x32 <t> x y) 18341 // cond: 18342 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 18343 for { 18344 t := v.Type 18345 x := v.Args[0] 18346 y := v.Args[1] 18347 v.reset(OpAMD64ANDL) 18348 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18349 v0.AddArg(x) 18350 v0.AddArg(y) 18351 v.AddArg(v0) 18352 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18353 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 18354 v2.AuxInt = 32 18355 v2.AddArg(y) 18356 v1.AddArg(v2) 18357 v.AddArg(v1) 18358 return true 18359 } 18360 } 18361 func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool { 18362 b := v.Block 18363 _ = b 18364 // match: (Lsh16x64 <t> x y) 18365 // cond: 18366 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 18367 for { 18368 t := v.Type 18369 x := v.Args[0] 18370 y := v.Args[1] 18371 v.reset(OpAMD64ANDL) 18372 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18373 v0.AddArg(x) 18374 v0.AddArg(y) 18375 v.AddArg(v0) 18376 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18377 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 18378 v2.AuxInt = 32 18379 v2.AddArg(y) 18380 v1.AddArg(v2) 18381 v.AddArg(v1) 18382 return true 18383 } 18384 } 18385 func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool { 18386 b := v.Block 18387 _ = b 18388 // match: (Lsh16x8 <t> x y) 18389 // cond: 18390 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 18391 for { 18392 t := v.Type 18393 x := v.Args[0] 18394 y := v.Args[1] 18395 v.reset(OpAMD64ANDL) 18396 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18397 v0.AddArg(x) 18398 v0.AddArg(y) 18399 v.AddArg(v0) 18400 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18401 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 18402 v2.AuxInt = 32 18403 v2.AddArg(y) 18404 v1.AddArg(v2) 18405 v.AddArg(v1) 18406 return true 18407 } 18408 } 18409 func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool { 18410 b := v.Block 18411 _ = b 18412 // match: (Lsh32x16 <t> x y) 18413 // cond: 18414 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 18415 for { 18416 t := v.Type 18417 x := v.Args[0] 18418 y := v.Args[1] 18419 v.reset(OpAMD64ANDL) 18420 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18421 v0.AddArg(x) 18422 v0.AddArg(y) 18423 v.AddArg(v0) 18424 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18425 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 18426 v2.AuxInt = 32 18427 v2.AddArg(y) 18428 v1.AddArg(v2) 18429 v.AddArg(v1) 18430 return true 18431 } 18432 } 18433 func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool { 18434 b := v.Block 18435 _ = b 18436 // match: (Lsh32x32 <t> x y) 18437 // cond: 18438 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 18439 for { 18440 t := v.Type 18441 x := v.Args[0] 18442 y := v.Args[1] 18443 v.reset(OpAMD64ANDL) 18444 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18445 v0.AddArg(x) 18446 v0.AddArg(y) 18447 v.AddArg(v0) 18448 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18449 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 18450 v2.AuxInt = 32 18451 v2.AddArg(y) 18452 v1.AddArg(v2) 18453 v.AddArg(v1) 18454 return true 18455 } 18456 } 18457 func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool { 18458 b := v.Block 18459 _ = b 18460 // match: (Lsh32x64 <t> x y) 18461 // cond: 18462 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 18463 for { 18464 t := v.Type 18465 x := v.Args[0] 18466 y := v.Args[1] 18467 v.reset(OpAMD64ANDL) 18468 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18469 v0.AddArg(x) 18470 v0.AddArg(y) 18471 v.AddArg(v0) 18472 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18473 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 18474 v2.AuxInt = 32 18475 v2.AddArg(y) 18476 v1.AddArg(v2) 18477 v.AddArg(v1) 18478 return true 18479 } 18480 } 18481 func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool { 18482 b := v.Block 18483 _ = b 18484 // match: (Lsh32x8 <t> x y) 18485 // cond: 18486 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 18487 for { 18488 t := v.Type 18489 x := v.Args[0] 18490 y := v.Args[1] 18491 v.reset(OpAMD64ANDL) 18492 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18493 v0.AddArg(x) 18494 v0.AddArg(y) 18495 v.AddArg(v0) 18496 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18497 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 18498 v2.AuxInt = 32 18499 v2.AddArg(y) 18500 v1.AddArg(v2) 18501 v.AddArg(v1) 18502 return true 18503 } 18504 } 18505 func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool { 18506 b := v.Block 18507 _ = b 18508 // match: (Lsh64x16 <t> x y) 18509 // cond: 18510 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 18511 for { 18512 t := v.Type 18513 x := v.Args[0] 18514 y := v.Args[1] 18515 v.reset(OpAMD64ANDQ) 18516 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 18517 v0.AddArg(x) 18518 v0.AddArg(y) 18519 v.AddArg(v0) 18520 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 18521 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 18522 v2.AuxInt = 64 18523 v2.AddArg(y) 18524 v1.AddArg(v2) 18525 v.AddArg(v1) 18526 return true 18527 } 18528 } 18529 func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool { 18530 b := v.Block 18531 _ = b 18532 // match: (Lsh64x32 <t> x y) 18533 // cond: 18534 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 18535 for { 18536 t := v.Type 18537 x := v.Args[0] 18538 y := v.Args[1] 18539 v.reset(OpAMD64ANDQ) 18540 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 18541 v0.AddArg(x) 18542 v0.AddArg(y) 18543 v.AddArg(v0) 18544 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 18545 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 18546 v2.AuxInt = 64 18547 v2.AddArg(y) 18548 v1.AddArg(v2) 18549 v.AddArg(v1) 18550 return true 18551 } 18552 } 18553 func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool { 18554 b := v.Block 18555 _ = b 18556 // match: (Lsh64x64 <t> x y) 18557 // cond: 18558 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 18559 for { 18560 t := v.Type 18561 x := v.Args[0] 18562 y := v.Args[1] 18563 v.reset(OpAMD64ANDQ) 18564 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 18565 v0.AddArg(x) 18566 v0.AddArg(y) 18567 v.AddArg(v0) 18568 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 18569 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 18570 v2.AuxInt = 64 18571 v2.AddArg(y) 18572 v1.AddArg(v2) 18573 v.AddArg(v1) 18574 return true 18575 } 18576 } 18577 func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool { 18578 b := v.Block 18579 _ = b 18580 // match: (Lsh64x8 <t> x y) 18581 // cond: 18582 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 18583 for { 18584 t := v.Type 18585 x := v.Args[0] 18586 y := v.Args[1] 18587 v.reset(OpAMD64ANDQ) 18588 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 18589 v0.AddArg(x) 18590 v0.AddArg(y) 18591 v.AddArg(v0) 18592 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 18593 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 18594 v2.AuxInt = 64 18595 v2.AddArg(y) 18596 v1.AddArg(v2) 18597 v.AddArg(v1) 18598 return true 18599 } 18600 } 18601 func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool { 18602 b := v.Block 18603 _ = b 18604 // match: (Lsh8x16 <t> x y) 18605 // cond: 18606 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 18607 for { 18608 t := v.Type 18609 x := v.Args[0] 18610 y := v.Args[1] 18611 v.reset(OpAMD64ANDL) 18612 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18613 v0.AddArg(x) 18614 v0.AddArg(y) 18615 v.AddArg(v0) 18616 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18617 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 18618 v2.AuxInt = 32 18619 v2.AddArg(y) 18620 v1.AddArg(v2) 18621 v.AddArg(v1) 18622 return true 18623 } 18624 } 18625 func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool { 18626 b := v.Block 18627 _ = b 18628 // match: (Lsh8x32 <t> x y) 18629 // cond: 18630 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 18631 for { 18632 t := v.Type 18633 x := v.Args[0] 18634 y := v.Args[1] 18635 v.reset(OpAMD64ANDL) 18636 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18637 v0.AddArg(x) 18638 v0.AddArg(y) 18639 v.AddArg(v0) 18640 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18641 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 18642 v2.AuxInt = 32 18643 v2.AddArg(y) 18644 v1.AddArg(v2) 18645 v.AddArg(v1) 18646 return true 18647 } 18648 } 18649 func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool { 18650 b := v.Block 18651 _ = b 18652 // match: (Lsh8x64 <t> x y) 18653 // cond: 18654 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 18655 for { 18656 t := v.Type 18657 x := v.Args[0] 18658 y := v.Args[1] 18659 v.reset(OpAMD64ANDL) 18660 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18661 v0.AddArg(x) 18662 v0.AddArg(y) 18663 v.AddArg(v0) 18664 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18665 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 18666 v2.AuxInt = 32 18667 v2.AddArg(y) 18668 v1.AddArg(v2) 18669 v.AddArg(v1) 18670 return true 18671 } 18672 } 18673 func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool { 18674 b := v.Block 18675 _ = b 18676 // match: (Lsh8x8 <t> x y) 18677 // cond: 18678 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 18679 for { 18680 t := v.Type 18681 x := v.Args[0] 18682 y := v.Args[1] 18683 v.reset(OpAMD64ANDL) 18684 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 18685 v0.AddArg(x) 18686 v0.AddArg(y) 18687 v.AddArg(v0) 18688 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 18689 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 18690 v2.AuxInt = 32 18691 v2.AddArg(y) 18692 v1.AddArg(v2) 18693 v.AddArg(v1) 18694 return true 18695 } 18696 } 18697 func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool { 18698 b := v.Block 18699 _ = b 18700 // match: (Mod16 x y) 18701 // cond: 18702 // result: (Select1 (DIVW x y)) 18703 for { 18704 x := v.Args[0] 18705 y := v.Args[1] 18706 v.reset(OpSelect1) 18707 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 18708 v0.AddArg(x) 18709 v0.AddArg(y) 18710 v.AddArg(v0) 18711 return true 18712 } 18713 } 18714 func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool { 18715 b := v.Block 18716 _ = b 18717 // match: (Mod16u x y) 18718 // cond: 18719 // result: (Select1 (DIVWU x y)) 18720 for { 18721 x := v.Args[0] 18722 y := v.Args[1] 18723 v.reset(OpSelect1) 18724 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 18725 v0.AddArg(x) 18726 v0.AddArg(y) 18727 v.AddArg(v0) 18728 return true 18729 } 18730 } 18731 func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool { 18732 b := v.Block 18733 _ = b 18734 // match: (Mod32 x y) 18735 // cond: 18736 // result: (Select1 (DIVL x y)) 18737 for { 18738 x := v.Args[0] 18739 y := v.Args[1] 18740 v.reset(OpSelect1) 18741 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32())) 18742 v0.AddArg(x) 18743 v0.AddArg(y) 18744 v.AddArg(v0) 18745 return true 18746 } 18747 } 18748 func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool { 18749 b := v.Block 18750 _ = b 18751 // match: (Mod32u x y) 18752 // cond: 18753 // result: (Select1 (DIVLU x y)) 18754 for { 18755 x := v.Args[0] 18756 y := v.Args[1] 18757 v.reset(OpSelect1) 18758 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32())) 18759 v0.AddArg(x) 18760 v0.AddArg(y) 18761 v.AddArg(v0) 18762 return true 18763 } 18764 } 18765 func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool { 18766 b := v.Block 18767 _ = b 18768 // match: (Mod64 x y) 18769 // cond: 18770 // result: (Select1 (DIVQ x y)) 18771 for { 18772 x := v.Args[0] 18773 y := v.Args[1] 18774 v.reset(OpSelect1) 18775 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64())) 18776 v0.AddArg(x) 18777 v0.AddArg(y) 18778 v.AddArg(v0) 18779 return true 18780 } 18781 } 18782 func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool { 18783 b := v.Block 18784 _ = b 18785 // match: (Mod64u x y) 18786 // cond: 18787 // result: (Select1 (DIVQU x y)) 18788 for { 18789 x := v.Args[0] 18790 y := v.Args[1] 18791 v.reset(OpSelect1) 18792 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64())) 18793 v0.AddArg(x) 18794 v0.AddArg(y) 18795 v.AddArg(v0) 18796 return true 18797 } 18798 } 18799 func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool { 18800 b := v.Block 18801 _ = b 18802 // match: (Mod8 x y) 18803 // cond: 18804 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 18805 for { 18806 x := v.Args[0] 18807 y := v.Args[1] 18808 v.reset(OpSelect1) 18809 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16())) 18810 v1 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16()) 18811 v1.AddArg(x) 18812 v0.AddArg(v1) 18813 v2 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16()) 18814 v2.AddArg(y) 18815 v0.AddArg(v2) 18816 v.AddArg(v0) 18817 return true 18818 } 18819 } 18820 func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool { 18821 b := v.Block 18822 _ = b 18823 // match: (Mod8u x y) 18824 // cond: 18825 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 18826 for { 18827 x := v.Args[0] 18828 y := v.Args[1] 18829 v.reset(OpSelect1) 18830 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16())) 18831 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16()) 18832 v1.AddArg(x) 18833 v0.AddArg(v1) 18834 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16()) 18835 v2.AddArg(y) 18836 v0.AddArg(v2) 18837 v.AddArg(v0) 18838 return true 18839 } 18840 } 18841 func rewriteValueAMD64_OpMove(v *Value, config *Config) bool { 18842 b := v.Block 18843 _ = b 18844 // match: (Move [s] _ _ mem) 18845 // cond: SizeAndAlign(s).Size() == 0 18846 // result: mem 18847 for { 18848 s := v.AuxInt 18849 mem := v.Args[2] 18850 if !(SizeAndAlign(s).Size() == 0) { 18851 break 18852 } 18853 v.reset(OpCopy) 18854 v.Type = mem.Type 18855 v.AddArg(mem) 18856 return true 18857 } 18858 // match: (Move [s] dst src mem) 18859 // cond: SizeAndAlign(s).Size() == 1 18860 // result: (MOVBstore dst (MOVBload src mem) mem) 18861 for { 18862 s := v.AuxInt 18863 dst := v.Args[0] 18864 src := v.Args[1] 18865 mem := v.Args[2] 18866 if !(SizeAndAlign(s).Size() == 1) { 18867 break 18868 } 18869 v.reset(OpAMD64MOVBstore) 18870 v.AddArg(dst) 18871 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, config.fe.TypeUInt8()) 18872 v0.AddArg(src) 18873 v0.AddArg(mem) 18874 v.AddArg(v0) 18875 v.AddArg(mem) 18876 return true 18877 } 18878 // match: (Move [s] dst src mem) 18879 // cond: SizeAndAlign(s).Size() == 2 18880 // result: (MOVWstore dst (MOVWload src mem) mem) 18881 for { 18882 s := v.AuxInt 18883 dst := v.Args[0] 18884 src := v.Args[1] 18885 mem := v.Args[2] 18886 if !(SizeAndAlign(s).Size() == 2) { 18887 break 18888 } 18889 v.reset(OpAMD64MOVWstore) 18890 v.AddArg(dst) 18891 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16()) 18892 v0.AddArg(src) 18893 v0.AddArg(mem) 18894 v.AddArg(v0) 18895 v.AddArg(mem) 18896 return true 18897 } 18898 // match: (Move [s] dst src mem) 18899 // cond: SizeAndAlign(s).Size() == 4 18900 // result: (MOVLstore dst (MOVLload src mem) mem) 18901 for { 18902 s := v.AuxInt 18903 dst := v.Args[0] 18904 src := v.Args[1] 18905 mem := v.Args[2] 18906 if !(SizeAndAlign(s).Size() == 4) { 18907 break 18908 } 18909 v.reset(OpAMD64MOVLstore) 18910 v.AddArg(dst) 18911 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 18912 v0.AddArg(src) 18913 v0.AddArg(mem) 18914 v.AddArg(v0) 18915 v.AddArg(mem) 18916 return true 18917 } 18918 // match: (Move [s] dst src mem) 18919 // cond: SizeAndAlign(s).Size() == 8 18920 // result: (MOVQstore dst (MOVQload src mem) mem) 18921 for { 18922 s := v.AuxInt 18923 dst := v.Args[0] 18924 src := v.Args[1] 18925 mem := v.Args[2] 18926 if !(SizeAndAlign(s).Size() == 8) { 18927 break 18928 } 18929 v.reset(OpAMD64MOVQstore) 18930 v.AddArg(dst) 18931 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64()) 18932 v0.AddArg(src) 18933 v0.AddArg(mem) 18934 v.AddArg(v0) 18935 v.AddArg(mem) 18936 return true 18937 } 18938 // match: (Move [s] dst src mem) 18939 // cond: SizeAndAlign(s).Size() == 16 18940 // result: (MOVOstore dst (MOVOload src mem) mem) 18941 for { 18942 s := v.AuxInt 18943 dst := v.Args[0] 18944 src := v.Args[1] 18945 mem := v.Args[2] 18946 if !(SizeAndAlign(s).Size() == 16) { 18947 break 18948 } 18949 v.reset(OpAMD64MOVOstore) 18950 v.AddArg(dst) 18951 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, TypeInt128) 18952 v0.AddArg(src) 18953 v0.AddArg(mem) 18954 v.AddArg(v0) 18955 v.AddArg(mem) 18956 return true 18957 } 18958 // match: (Move [s] dst src mem) 18959 // cond: SizeAndAlign(s).Size() == 3 18960 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 18961 for { 18962 s := v.AuxInt 18963 dst := v.Args[0] 18964 src := v.Args[1] 18965 mem := v.Args[2] 18966 if !(SizeAndAlign(s).Size() == 3) { 18967 break 18968 } 18969 v.reset(OpAMD64MOVBstore) 18970 v.AuxInt = 2 18971 v.AddArg(dst) 18972 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, config.fe.TypeUInt8()) 18973 v0.AuxInt = 2 18974 v0.AddArg(src) 18975 v0.AddArg(mem) 18976 v.AddArg(v0) 18977 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, TypeMem) 18978 v1.AddArg(dst) 18979 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16()) 18980 v2.AddArg(src) 18981 v2.AddArg(mem) 18982 v1.AddArg(v2) 18983 v1.AddArg(mem) 18984 v.AddArg(v1) 18985 return true 18986 } 18987 // match: (Move [s] dst src mem) 18988 // cond: SizeAndAlign(s).Size() == 5 18989 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 18990 for { 18991 s := v.AuxInt 18992 dst := v.Args[0] 18993 src := v.Args[1] 18994 mem := v.Args[2] 18995 if !(SizeAndAlign(s).Size() == 5) { 18996 break 18997 } 18998 v.reset(OpAMD64MOVBstore) 18999 v.AuxInt = 4 19000 v.AddArg(dst) 19001 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, config.fe.TypeUInt8()) 19002 v0.AuxInt = 4 19003 v0.AddArg(src) 19004 v0.AddArg(mem) 19005 v.AddArg(v0) 19006 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem) 19007 v1.AddArg(dst) 19008 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 19009 v2.AddArg(src) 19010 v2.AddArg(mem) 19011 v1.AddArg(v2) 19012 v1.AddArg(mem) 19013 v.AddArg(v1) 19014 return true 19015 } 19016 // match: (Move [s] dst src mem) 19017 // cond: SizeAndAlign(s).Size() == 6 19018 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 19019 for { 19020 s := v.AuxInt 19021 dst := v.Args[0] 19022 src := v.Args[1] 19023 mem := v.Args[2] 19024 if !(SizeAndAlign(s).Size() == 6) { 19025 break 19026 } 19027 v.reset(OpAMD64MOVWstore) 19028 v.AuxInt = 4 19029 v.AddArg(dst) 19030 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16()) 19031 v0.AuxInt = 4 19032 v0.AddArg(src) 19033 v0.AddArg(mem) 19034 v.AddArg(v0) 19035 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem) 19036 v1.AddArg(dst) 19037 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 19038 v2.AddArg(src) 19039 v2.AddArg(mem) 19040 v1.AddArg(v2) 19041 v1.AddArg(mem) 19042 v.AddArg(v1) 19043 return true 19044 } 19045 // match: (Move [s] dst src mem) 19046 // cond: SizeAndAlign(s).Size() == 7 19047 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 19048 for { 19049 s := v.AuxInt 19050 dst := v.Args[0] 19051 src := v.Args[1] 19052 mem := v.Args[2] 19053 if !(SizeAndAlign(s).Size() == 7) { 19054 break 19055 } 19056 v.reset(OpAMD64MOVLstore) 19057 v.AuxInt = 3 19058 v.AddArg(dst) 19059 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 19060 v0.AuxInt = 3 19061 v0.AddArg(src) 19062 v0.AddArg(mem) 19063 v.AddArg(v0) 19064 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem) 19065 v1.AddArg(dst) 19066 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32()) 19067 v2.AddArg(src) 19068 v2.AddArg(mem) 19069 v1.AddArg(v2) 19070 v1.AddArg(mem) 19071 v.AddArg(v1) 19072 return true 19073 } 19074 // match: (Move [s] dst src mem) 19075 // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16 19076 // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 19077 for { 19078 s := v.AuxInt 19079 dst := v.Args[0] 19080 src := v.Args[1] 19081 mem := v.Args[2] 19082 if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) { 19083 break 19084 } 19085 v.reset(OpAMD64MOVQstore) 19086 v.AuxInt = SizeAndAlign(s).Size() - 8 19087 v.AddArg(dst) 19088 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64()) 19089 v0.AuxInt = SizeAndAlign(s).Size() - 8 19090 v0.AddArg(src) 19091 v0.AddArg(mem) 19092 v.AddArg(v0) 19093 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem) 19094 v1.AddArg(dst) 19095 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64()) 19096 v2.AddArg(src) 19097 v2.AddArg(mem) 19098 v1.AddArg(v2) 19099 v1.AddArg(mem) 19100 v.AddArg(v1) 19101 return true 19102 } 19103 // match: (Move [s] dst src mem) 19104 // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8 19105 // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem)) 19106 for { 19107 s := v.AuxInt 19108 dst := v.Args[0] 19109 src := v.Args[1] 19110 mem := v.Args[2] 19111 if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) { 19112 break 19113 } 19114 v.reset(OpMove) 19115 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16 19116 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 19117 v0.AuxInt = SizeAndAlign(s).Size() % 16 19118 v0.AddArg(dst) 19119 v.AddArg(v0) 19120 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 19121 v1.AuxInt = SizeAndAlign(s).Size() % 16 19122 v1.AddArg(src) 19123 v.AddArg(v1) 19124 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem) 19125 v2.AddArg(dst) 19126 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64()) 19127 v3.AddArg(src) 19128 v3.AddArg(mem) 19129 v2.AddArg(v3) 19130 v2.AddArg(mem) 19131 v.AddArg(v2) 19132 return true 19133 } 19134 // match: (Move [s] dst src mem) 19135 // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8 19136 // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem)) 19137 for { 19138 s := v.AuxInt 19139 dst := v.Args[0] 19140 src := v.Args[1] 19141 mem := v.Args[2] 19142 if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) { 19143 break 19144 } 19145 v.reset(OpMove) 19146 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16 19147 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 19148 v0.AuxInt = SizeAndAlign(s).Size() % 16 19149 v0.AddArg(dst) 19150 v.AddArg(v0) 19151 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 19152 v1.AuxInt = SizeAndAlign(s).Size() % 16 19153 v1.AddArg(src) 19154 v.AddArg(v1) 19155 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, TypeMem) 19156 v2.AddArg(dst) 19157 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, TypeInt128) 19158 v3.AddArg(src) 19159 v3.AddArg(mem) 19160 v2.AddArg(v3) 19161 v2.AddArg(mem) 19162 v.AddArg(v2) 19163 return true 19164 } 19165 // match: (Move [s] dst src mem) 19166 // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice 19167 // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem) 19168 for { 19169 s := v.AuxInt 19170 dst := v.Args[0] 19171 src := v.Args[1] 19172 mem := v.Args[2] 19173 if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) { 19174 break 19175 } 19176 v.reset(OpAMD64DUFFCOPY) 19177 v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16) 19178 v.AddArg(dst) 19179 v.AddArg(src) 19180 v.AddArg(mem) 19181 return true 19182 } 19183 // match: (Move [s] dst src mem) 19184 // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0 19185 // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem) 19186 for { 19187 s := v.AuxInt 19188 dst := v.Args[0] 19189 src := v.Args[1] 19190 mem := v.Args[2] 19191 if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) { 19192 break 19193 } 19194 v.reset(OpAMD64REPMOVSQ) 19195 v.AddArg(dst) 19196 v.AddArg(src) 19197 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 19198 v0.AuxInt = SizeAndAlign(s).Size() / 8 19199 v.AddArg(v0) 19200 v.AddArg(mem) 19201 return true 19202 } 19203 return false 19204 } 19205 func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool { 19206 b := v.Block 19207 _ = b 19208 // match: (Mul16 x y) 19209 // cond: 19210 // result: (MULL x y) 19211 for { 19212 x := v.Args[0] 19213 y := v.Args[1] 19214 v.reset(OpAMD64MULL) 19215 v.AddArg(x) 19216 v.AddArg(y) 19217 return true 19218 } 19219 } 19220 func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool { 19221 b := v.Block 19222 _ = b 19223 // match: (Mul32 x y) 19224 // cond: 19225 // result: (MULL x y) 19226 for { 19227 x := v.Args[0] 19228 y := v.Args[1] 19229 v.reset(OpAMD64MULL) 19230 v.AddArg(x) 19231 v.AddArg(y) 19232 return true 19233 } 19234 } 19235 func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool { 19236 b := v.Block 19237 _ = b 19238 // match: (Mul32F x y) 19239 // cond: 19240 // result: (MULSS x y) 19241 for { 19242 x := v.Args[0] 19243 y := v.Args[1] 19244 v.reset(OpAMD64MULSS) 19245 v.AddArg(x) 19246 v.AddArg(y) 19247 return true 19248 } 19249 } 19250 func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool { 19251 b := v.Block 19252 _ = b 19253 // match: (Mul64 x y) 19254 // cond: 19255 // result: (MULQ x y) 19256 for { 19257 x := v.Args[0] 19258 y := v.Args[1] 19259 v.reset(OpAMD64MULQ) 19260 v.AddArg(x) 19261 v.AddArg(y) 19262 return true 19263 } 19264 } 19265 func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool { 19266 b := v.Block 19267 _ = b 19268 // match: (Mul64F x y) 19269 // cond: 19270 // result: (MULSD x y) 19271 for { 19272 x := v.Args[0] 19273 y := v.Args[1] 19274 v.reset(OpAMD64MULSD) 19275 v.AddArg(x) 19276 v.AddArg(y) 19277 return true 19278 } 19279 } 19280 func rewriteValueAMD64_OpMul64uhilo(v *Value, config *Config) bool { 19281 b := v.Block 19282 _ = b 19283 // match: (Mul64uhilo x y) 19284 // cond: 19285 // result: (MULQU2 x y) 19286 for { 19287 x := v.Args[0] 19288 y := v.Args[1] 19289 v.reset(OpAMD64MULQU2) 19290 v.AddArg(x) 19291 v.AddArg(y) 19292 return true 19293 } 19294 } 19295 func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool { 19296 b := v.Block 19297 _ = b 19298 // match: (Mul8 x y) 19299 // cond: 19300 // result: (MULL x y) 19301 for { 19302 x := v.Args[0] 19303 y := v.Args[1] 19304 v.reset(OpAMD64MULL) 19305 v.AddArg(x) 19306 v.AddArg(y) 19307 return true 19308 } 19309 } 19310 func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool { 19311 b := v.Block 19312 _ = b 19313 // match: (Neg16 x) 19314 // cond: 19315 // result: (NEGL x) 19316 for { 19317 x := v.Args[0] 19318 v.reset(OpAMD64NEGL) 19319 v.AddArg(x) 19320 return true 19321 } 19322 } 19323 func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool { 19324 b := v.Block 19325 _ = b 19326 // match: (Neg32 x) 19327 // cond: 19328 // result: (NEGL x) 19329 for { 19330 x := v.Args[0] 19331 v.reset(OpAMD64NEGL) 19332 v.AddArg(x) 19333 return true 19334 } 19335 } 19336 func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool { 19337 b := v.Block 19338 _ = b 19339 // match: (Neg32F x) 19340 // cond: 19341 // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))])) 19342 for { 19343 x := v.Args[0] 19344 v.reset(OpAMD64PXOR) 19345 v.AddArg(x) 19346 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, config.Frontend().TypeFloat32()) 19347 v0.AuxInt = f2i(math.Copysign(0, -1)) 19348 v.AddArg(v0) 19349 return true 19350 } 19351 } 19352 func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool { 19353 b := v.Block 19354 _ = b 19355 // match: (Neg64 x) 19356 // cond: 19357 // result: (NEGQ x) 19358 for { 19359 x := v.Args[0] 19360 v.reset(OpAMD64NEGQ) 19361 v.AddArg(x) 19362 return true 19363 } 19364 } 19365 func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool { 19366 b := v.Block 19367 _ = b 19368 // match: (Neg64F x) 19369 // cond: 19370 // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))])) 19371 for { 19372 x := v.Args[0] 19373 v.reset(OpAMD64PXOR) 19374 v.AddArg(x) 19375 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, config.Frontend().TypeFloat64()) 19376 v0.AuxInt = f2i(math.Copysign(0, -1)) 19377 v.AddArg(v0) 19378 return true 19379 } 19380 } 19381 func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool { 19382 b := v.Block 19383 _ = b 19384 // match: (Neg8 x) 19385 // cond: 19386 // result: (NEGL x) 19387 for { 19388 x := v.Args[0] 19389 v.reset(OpAMD64NEGL) 19390 v.AddArg(x) 19391 return true 19392 } 19393 } 19394 func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool { 19395 b := v.Block 19396 _ = b 19397 // match: (Neq16 x y) 19398 // cond: 19399 // result: (SETNE (CMPW x y)) 19400 for { 19401 x := v.Args[0] 19402 y := v.Args[1] 19403 v.reset(OpAMD64SETNE) 19404 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags) 19405 v0.AddArg(x) 19406 v0.AddArg(y) 19407 v.AddArg(v0) 19408 return true 19409 } 19410 } 19411 func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool { 19412 b := v.Block 19413 _ = b 19414 // match: (Neq32 x y) 19415 // cond: 19416 // result: (SETNE (CMPL x y)) 19417 for { 19418 x := v.Args[0] 19419 y := v.Args[1] 19420 v.reset(OpAMD64SETNE) 19421 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 19422 v0.AddArg(x) 19423 v0.AddArg(y) 19424 v.AddArg(v0) 19425 return true 19426 } 19427 } 19428 func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool { 19429 b := v.Block 19430 _ = b 19431 // match: (Neq32F x y) 19432 // cond: 19433 // result: (SETNEF (UCOMISS x y)) 19434 for { 19435 x := v.Args[0] 19436 y := v.Args[1] 19437 v.reset(OpAMD64SETNEF) 19438 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags) 19439 v0.AddArg(x) 19440 v0.AddArg(y) 19441 v.AddArg(v0) 19442 return true 19443 } 19444 } 19445 func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool { 19446 b := v.Block 19447 _ = b 19448 // match: (Neq64 x y) 19449 // cond: 19450 // result: (SETNE (CMPQ x y)) 19451 for { 19452 x := v.Args[0] 19453 y := v.Args[1] 19454 v.reset(OpAMD64SETNE) 19455 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 19456 v0.AddArg(x) 19457 v0.AddArg(y) 19458 v.AddArg(v0) 19459 return true 19460 } 19461 } 19462 func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool { 19463 b := v.Block 19464 _ = b 19465 // match: (Neq64F x y) 19466 // cond: 19467 // result: (SETNEF (UCOMISD x y)) 19468 for { 19469 x := v.Args[0] 19470 y := v.Args[1] 19471 v.reset(OpAMD64SETNEF) 19472 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags) 19473 v0.AddArg(x) 19474 v0.AddArg(y) 19475 v.AddArg(v0) 19476 return true 19477 } 19478 } 19479 func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool { 19480 b := v.Block 19481 _ = b 19482 // match: (Neq8 x y) 19483 // cond: 19484 // result: (SETNE (CMPB x y)) 19485 for { 19486 x := v.Args[0] 19487 y := v.Args[1] 19488 v.reset(OpAMD64SETNE) 19489 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 19490 v0.AddArg(x) 19491 v0.AddArg(y) 19492 v.AddArg(v0) 19493 return true 19494 } 19495 } 19496 func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool { 19497 b := v.Block 19498 _ = b 19499 // match: (NeqB x y) 19500 // cond: 19501 // result: (SETNE (CMPB x y)) 19502 for { 19503 x := v.Args[0] 19504 y := v.Args[1] 19505 v.reset(OpAMD64SETNE) 19506 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags) 19507 v0.AddArg(x) 19508 v0.AddArg(y) 19509 v.AddArg(v0) 19510 return true 19511 } 19512 } 19513 func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool { 19514 b := v.Block 19515 _ = b 19516 // match: (NeqPtr x y) 19517 // cond: config.PtrSize == 8 19518 // result: (SETNE (CMPQ x y)) 19519 for { 19520 x := v.Args[0] 19521 y := v.Args[1] 19522 if !(config.PtrSize == 8) { 19523 break 19524 } 19525 v.reset(OpAMD64SETNE) 19526 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags) 19527 v0.AddArg(x) 19528 v0.AddArg(y) 19529 v.AddArg(v0) 19530 return true 19531 } 19532 // match: (NeqPtr x y) 19533 // cond: config.PtrSize == 4 19534 // result: (SETNE (CMPL x y)) 19535 for { 19536 x := v.Args[0] 19537 y := v.Args[1] 19538 if !(config.PtrSize == 4) { 19539 break 19540 } 19541 v.reset(OpAMD64SETNE) 19542 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags) 19543 v0.AddArg(x) 19544 v0.AddArg(y) 19545 v.AddArg(v0) 19546 return true 19547 } 19548 return false 19549 } 19550 func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool { 19551 b := v.Block 19552 _ = b 19553 // match: (NilCheck ptr mem) 19554 // cond: 19555 // result: (LoweredNilCheck ptr mem) 19556 for { 19557 ptr := v.Args[0] 19558 mem := v.Args[1] 19559 v.reset(OpAMD64LoweredNilCheck) 19560 v.AddArg(ptr) 19561 v.AddArg(mem) 19562 return true 19563 } 19564 } 19565 func rewriteValueAMD64_OpNot(v *Value, config *Config) bool { 19566 b := v.Block 19567 _ = b 19568 // match: (Not x) 19569 // cond: 19570 // result: (XORLconst [1] x) 19571 for { 19572 x := v.Args[0] 19573 v.reset(OpAMD64XORLconst) 19574 v.AuxInt = 1 19575 v.AddArg(x) 19576 return true 19577 } 19578 } 19579 func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool { 19580 b := v.Block 19581 _ = b 19582 // match: (OffPtr [off] ptr) 19583 // cond: config.PtrSize == 8 && is32Bit(off) 19584 // result: (ADDQconst [off] ptr) 19585 for { 19586 off := v.AuxInt 19587 ptr := v.Args[0] 19588 if !(config.PtrSize == 8 && is32Bit(off)) { 19589 break 19590 } 19591 v.reset(OpAMD64ADDQconst) 19592 v.AuxInt = off 19593 v.AddArg(ptr) 19594 return true 19595 } 19596 // match: (OffPtr [off] ptr) 19597 // cond: config.PtrSize == 8 19598 // result: (ADDQ (MOVQconst [off]) ptr) 19599 for { 19600 off := v.AuxInt 19601 ptr := v.Args[0] 19602 if !(config.PtrSize == 8) { 19603 break 19604 } 19605 v.reset(OpAMD64ADDQ) 19606 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 19607 v0.AuxInt = off 19608 v.AddArg(v0) 19609 v.AddArg(ptr) 19610 return true 19611 } 19612 // match: (OffPtr [off] ptr) 19613 // cond: config.PtrSize == 4 19614 // result: (ADDLconst [off] ptr) 19615 for { 19616 off := v.AuxInt 19617 ptr := v.Args[0] 19618 if !(config.PtrSize == 4) { 19619 break 19620 } 19621 v.reset(OpAMD64ADDLconst) 19622 v.AuxInt = off 19623 v.AddArg(ptr) 19624 return true 19625 } 19626 return false 19627 } 19628 func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool { 19629 b := v.Block 19630 _ = b 19631 // match: (Or16 x y) 19632 // cond: 19633 // result: (ORL x y) 19634 for { 19635 x := v.Args[0] 19636 y := v.Args[1] 19637 v.reset(OpAMD64ORL) 19638 v.AddArg(x) 19639 v.AddArg(y) 19640 return true 19641 } 19642 } 19643 func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool { 19644 b := v.Block 19645 _ = b 19646 // match: (Or32 x y) 19647 // cond: 19648 // result: (ORL x y) 19649 for { 19650 x := v.Args[0] 19651 y := v.Args[1] 19652 v.reset(OpAMD64ORL) 19653 v.AddArg(x) 19654 v.AddArg(y) 19655 return true 19656 } 19657 } 19658 func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool { 19659 b := v.Block 19660 _ = b 19661 // match: (Or64 x y) 19662 // cond: 19663 // result: (ORQ x y) 19664 for { 19665 x := v.Args[0] 19666 y := v.Args[1] 19667 v.reset(OpAMD64ORQ) 19668 v.AddArg(x) 19669 v.AddArg(y) 19670 return true 19671 } 19672 } 19673 func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool { 19674 b := v.Block 19675 _ = b 19676 // match: (Or8 x y) 19677 // cond: 19678 // result: (ORL x y) 19679 for { 19680 x := v.Args[0] 19681 y := v.Args[1] 19682 v.reset(OpAMD64ORL) 19683 v.AddArg(x) 19684 v.AddArg(y) 19685 return true 19686 } 19687 } 19688 func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool { 19689 b := v.Block 19690 _ = b 19691 // match: (OrB x y) 19692 // cond: 19693 // result: (ORL x y) 19694 for { 19695 x := v.Args[0] 19696 y := v.Args[1] 19697 v.reset(OpAMD64ORL) 19698 v.AddArg(x) 19699 v.AddArg(y) 19700 return true 19701 } 19702 } 19703 func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool { 19704 b := v.Block 19705 _ = b 19706 // match: (Rsh16Ux16 <t> x y) 19707 // cond: 19708 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 19709 for { 19710 t := v.Type 19711 x := v.Args[0] 19712 y := v.Args[1] 19713 v.reset(OpAMD64ANDL) 19714 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 19715 v0.AddArg(x) 19716 v0.AddArg(y) 19717 v.AddArg(v0) 19718 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19719 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 19720 v2.AuxInt = 16 19721 v2.AddArg(y) 19722 v1.AddArg(v2) 19723 v.AddArg(v1) 19724 return true 19725 } 19726 } 19727 func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool { 19728 b := v.Block 19729 _ = b 19730 // match: (Rsh16Ux32 <t> x y) 19731 // cond: 19732 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 19733 for { 19734 t := v.Type 19735 x := v.Args[0] 19736 y := v.Args[1] 19737 v.reset(OpAMD64ANDL) 19738 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 19739 v0.AddArg(x) 19740 v0.AddArg(y) 19741 v.AddArg(v0) 19742 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19743 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 19744 v2.AuxInt = 16 19745 v2.AddArg(y) 19746 v1.AddArg(v2) 19747 v.AddArg(v1) 19748 return true 19749 } 19750 } 19751 func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool { 19752 b := v.Block 19753 _ = b 19754 // match: (Rsh16Ux64 <t> x y) 19755 // cond: 19756 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 19757 for { 19758 t := v.Type 19759 x := v.Args[0] 19760 y := v.Args[1] 19761 v.reset(OpAMD64ANDL) 19762 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 19763 v0.AddArg(x) 19764 v0.AddArg(y) 19765 v.AddArg(v0) 19766 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19767 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 19768 v2.AuxInt = 16 19769 v2.AddArg(y) 19770 v1.AddArg(v2) 19771 v.AddArg(v1) 19772 return true 19773 } 19774 } 19775 func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool { 19776 b := v.Block 19777 _ = b 19778 // match: (Rsh16Ux8 <t> x y) 19779 // cond: 19780 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 19781 for { 19782 t := v.Type 19783 x := v.Args[0] 19784 y := v.Args[1] 19785 v.reset(OpAMD64ANDL) 19786 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 19787 v0.AddArg(x) 19788 v0.AddArg(y) 19789 v.AddArg(v0) 19790 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19791 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 19792 v2.AuxInt = 16 19793 v2.AddArg(y) 19794 v1.AddArg(v2) 19795 v.AddArg(v1) 19796 return true 19797 } 19798 } 19799 func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool { 19800 b := v.Block 19801 _ = b 19802 // match: (Rsh16x16 <t> x y) 19803 // cond: 19804 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 19805 for { 19806 t := v.Type 19807 x := v.Args[0] 19808 y := v.Args[1] 19809 v.reset(OpAMD64SARW) 19810 v.Type = t 19811 v.AddArg(x) 19812 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19813 v0.AddArg(y) 19814 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19815 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19816 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 19817 v3.AuxInt = 16 19818 v3.AddArg(y) 19819 v2.AddArg(v3) 19820 v1.AddArg(v2) 19821 v0.AddArg(v1) 19822 v.AddArg(v0) 19823 return true 19824 } 19825 } 19826 func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool { 19827 b := v.Block 19828 _ = b 19829 // match: (Rsh16x32 <t> x y) 19830 // cond: 19831 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 19832 for { 19833 t := v.Type 19834 x := v.Args[0] 19835 y := v.Args[1] 19836 v.reset(OpAMD64SARW) 19837 v.Type = t 19838 v.AddArg(x) 19839 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19840 v0.AddArg(y) 19841 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19842 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19843 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 19844 v3.AuxInt = 16 19845 v3.AddArg(y) 19846 v2.AddArg(v3) 19847 v1.AddArg(v2) 19848 v0.AddArg(v1) 19849 v.AddArg(v0) 19850 return true 19851 } 19852 } 19853 func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool { 19854 b := v.Block 19855 _ = b 19856 // match: (Rsh16x64 <t> x y) 19857 // cond: 19858 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 19859 for { 19860 t := v.Type 19861 x := v.Args[0] 19862 y := v.Args[1] 19863 v.reset(OpAMD64SARW) 19864 v.Type = t 19865 v.AddArg(x) 19866 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 19867 v0.AddArg(y) 19868 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 19869 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 19870 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 19871 v3.AuxInt = 16 19872 v3.AddArg(y) 19873 v2.AddArg(v3) 19874 v1.AddArg(v2) 19875 v0.AddArg(v1) 19876 v.AddArg(v0) 19877 return true 19878 } 19879 } 19880 func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { 19881 b := v.Block 19882 _ = b 19883 // match: (Rsh16x8 <t> x y) 19884 // cond: 19885 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 19886 for { 19887 t := v.Type 19888 x := v.Args[0] 19889 y := v.Args[1] 19890 v.reset(OpAMD64SARW) 19891 v.Type = t 19892 v.AddArg(x) 19893 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 19894 v0.AddArg(y) 19895 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 19896 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 19897 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 19898 v3.AuxInt = 16 19899 v3.AddArg(y) 19900 v2.AddArg(v3) 19901 v1.AddArg(v2) 19902 v0.AddArg(v1) 19903 v.AddArg(v0) 19904 return true 19905 } 19906 } 19907 func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool { 19908 b := v.Block 19909 _ = b 19910 // match: (Rsh32Ux16 <t> x y) 19911 // cond: 19912 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 19913 for { 19914 t := v.Type 19915 x := v.Args[0] 19916 y := v.Args[1] 19917 v.reset(OpAMD64ANDL) 19918 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 19919 v0.AddArg(x) 19920 v0.AddArg(y) 19921 v.AddArg(v0) 19922 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19923 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 19924 v2.AuxInt = 32 19925 v2.AddArg(y) 19926 v1.AddArg(v2) 19927 v.AddArg(v1) 19928 return true 19929 } 19930 } 19931 func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool { 19932 b := v.Block 19933 _ = b 19934 // match: (Rsh32Ux32 <t> x y) 19935 // cond: 19936 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 19937 for { 19938 t := v.Type 19939 x := v.Args[0] 19940 y := v.Args[1] 19941 v.reset(OpAMD64ANDL) 19942 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 19943 v0.AddArg(x) 19944 v0.AddArg(y) 19945 v.AddArg(v0) 19946 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19947 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 19948 v2.AuxInt = 32 19949 v2.AddArg(y) 19950 v1.AddArg(v2) 19951 v.AddArg(v1) 19952 return true 19953 } 19954 } 19955 func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool { 19956 b := v.Block 19957 _ = b 19958 // match: (Rsh32Ux64 <t> x y) 19959 // cond: 19960 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 19961 for { 19962 t := v.Type 19963 x := v.Args[0] 19964 y := v.Args[1] 19965 v.reset(OpAMD64ANDL) 19966 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 19967 v0.AddArg(x) 19968 v0.AddArg(y) 19969 v.AddArg(v0) 19970 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19971 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 19972 v2.AuxInt = 32 19973 v2.AddArg(y) 19974 v1.AddArg(v2) 19975 v.AddArg(v1) 19976 return true 19977 } 19978 } 19979 func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool { 19980 b := v.Block 19981 _ = b 19982 // match: (Rsh32Ux8 <t> x y) 19983 // cond: 19984 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 19985 for { 19986 t := v.Type 19987 x := v.Args[0] 19988 y := v.Args[1] 19989 v.reset(OpAMD64ANDL) 19990 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 19991 v0.AddArg(x) 19992 v0.AddArg(y) 19993 v.AddArg(v0) 19994 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 19995 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 19996 v2.AuxInt = 32 19997 v2.AddArg(y) 19998 v1.AddArg(v2) 19999 v.AddArg(v1) 20000 return true 20001 } 20002 } 20003 func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool { 20004 b := v.Block 20005 _ = b 20006 // match: (Rsh32x16 <t> x y) 20007 // cond: 20008 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 20009 for { 20010 t := v.Type 20011 x := v.Args[0] 20012 y := v.Args[1] 20013 v.reset(OpAMD64SARL) 20014 v.Type = t 20015 v.AddArg(x) 20016 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 20017 v0.AddArg(y) 20018 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 20019 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 20020 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 20021 v3.AuxInt = 32 20022 v3.AddArg(y) 20023 v2.AddArg(v3) 20024 v1.AddArg(v2) 20025 v0.AddArg(v1) 20026 v.AddArg(v0) 20027 return true 20028 } 20029 } 20030 func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool { 20031 b := v.Block 20032 _ = b 20033 // match: (Rsh32x32 <t> x y) 20034 // cond: 20035 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 20036 for { 20037 t := v.Type 20038 x := v.Args[0] 20039 y := v.Args[1] 20040 v.reset(OpAMD64SARL) 20041 v.Type = t 20042 v.AddArg(x) 20043 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 20044 v0.AddArg(y) 20045 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 20046 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 20047 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 20048 v3.AuxInt = 32 20049 v3.AddArg(y) 20050 v2.AddArg(v3) 20051 v1.AddArg(v2) 20052 v0.AddArg(v1) 20053 v.AddArg(v0) 20054 return true 20055 } 20056 } 20057 func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool { 20058 b := v.Block 20059 _ = b 20060 // match: (Rsh32x64 <t> x y) 20061 // cond: 20062 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 20063 for { 20064 t := v.Type 20065 x := v.Args[0] 20066 y := v.Args[1] 20067 v.reset(OpAMD64SARL) 20068 v.Type = t 20069 v.AddArg(x) 20070 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 20071 v0.AddArg(y) 20072 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 20073 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 20074 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 20075 v3.AuxInt = 32 20076 v3.AddArg(y) 20077 v2.AddArg(v3) 20078 v1.AddArg(v2) 20079 v0.AddArg(v1) 20080 v.AddArg(v0) 20081 return true 20082 } 20083 } 20084 func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { 20085 b := v.Block 20086 _ = b 20087 // match: (Rsh32x8 <t> x y) 20088 // cond: 20089 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 20090 for { 20091 t := v.Type 20092 x := v.Args[0] 20093 y := v.Args[1] 20094 v.reset(OpAMD64SARL) 20095 v.Type = t 20096 v.AddArg(x) 20097 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 20098 v0.AddArg(y) 20099 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 20100 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 20101 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 20102 v3.AuxInt = 32 20103 v3.AddArg(y) 20104 v2.AddArg(v3) 20105 v1.AddArg(v2) 20106 v0.AddArg(v1) 20107 v.AddArg(v0) 20108 return true 20109 } 20110 } 20111 func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool { 20112 b := v.Block 20113 _ = b 20114 // match: (Rsh64Ux16 <t> x y) 20115 // cond: 20116 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 20117 for { 20118 t := v.Type 20119 x := v.Args[0] 20120 y := v.Args[1] 20121 v.reset(OpAMD64ANDQ) 20122 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 20123 v0.AddArg(x) 20124 v0.AddArg(y) 20125 v.AddArg(v0) 20126 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 20127 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 20128 v2.AuxInt = 64 20129 v2.AddArg(y) 20130 v1.AddArg(v2) 20131 v.AddArg(v1) 20132 return true 20133 } 20134 } 20135 func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool { 20136 b := v.Block 20137 _ = b 20138 // match: (Rsh64Ux32 <t> x y) 20139 // cond: 20140 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 20141 for { 20142 t := v.Type 20143 x := v.Args[0] 20144 y := v.Args[1] 20145 v.reset(OpAMD64ANDQ) 20146 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 20147 v0.AddArg(x) 20148 v0.AddArg(y) 20149 v.AddArg(v0) 20150 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 20151 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 20152 v2.AuxInt = 64 20153 v2.AddArg(y) 20154 v1.AddArg(v2) 20155 v.AddArg(v1) 20156 return true 20157 } 20158 } 20159 func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool { 20160 b := v.Block 20161 _ = b 20162 // match: (Rsh64Ux64 <t> x y) 20163 // cond: 20164 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 20165 for { 20166 t := v.Type 20167 x := v.Args[0] 20168 y := v.Args[1] 20169 v.reset(OpAMD64ANDQ) 20170 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 20171 v0.AddArg(x) 20172 v0.AddArg(y) 20173 v.AddArg(v0) 20174 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 20175 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 20176 v2.AuxInt = 64 20177 v2.AddArg(y) 20178 v1.AddArg(v2) 20179 v.AddArg(v1) 20180 return true 20181 } 20182 } 20183 func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool { 20184 b := v.Block 20185 _ = b 20186 // match: (Rsh64Ux8 <t> x y) 20187 // cond: 20188 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 20189 for { 20190 t := v.Type 20191 x := v.Args[0] 20192 y := v.Args[1] 20193 v.reset(OpAMD64ANDQ) 20194 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 20195 v0.AddArg(x) 20196 v0.AddArg(y) 20197 v.AddArg(v0) 20198 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 20199 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 20200 v2.AuxInt = 64 20201 v2.AddArg(y) 20202 v1.AddArg(v2) 20203 v.AddArg(v1) 20204 return true 20205 } 20206 } 20207 func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool { 20208 b := v.Block 20209 _ = b 20210 // match: (Rsh64x16 <t> x y) 20211 // cond: 20212 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 20213 for { 20214 t := v.Type 20215 x := v.Args[0] 20216 y := v.Args[1] 20217 v.reset(OpAMD64SARQ) 20218 v.Type = t 20219 v.AddArg(x) 20220 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 20221 v0.AddArg(y) 20222 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 20223 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 20224 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 20225 v3.AuxInt = 64 20226 v3.AddArg(y) 20227 v2.AddArg(v3) 20228 v1.AddArg(v2) 20229 v0.AddArg(v1) 20230 v.AddArg(v0) 20231 return true 20232 } 20233 } 20234 func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool { 20235 b := v.Block 20236 _ = b 20237 // match: (Rsh64x32 <t> x y) 20238 // cond: 20239 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 20240 for { 20241 t := v.Type 20242 x := v.Args[0] 20243 y := v.Args[1] 20244 v.reset(OpAMD64SARQ) 20245 v.Type = t 20246 v.AddArg(x) 20247 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 20248 v0.AddArg(y) 20249 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 20250 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 20251 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 20252 v3.AuxInt = 64 20253 v3.AddArg(y) 20254 v2.AddArg(v3) 20255 v1.AddArg(v2) 20256 v0.AddArg(v1) 20257 v.AddArg(v0) 20258 return true 20259 } 20260 } 20261 func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool { 20262 b := v.Block 20263 _ = b 20264 // match: (Rsh64x64 <t> x y) 20265 // cond: 20266 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 20267 for { 20268 t := v.Type 20269 x := v.Args[0] 20270 y := v.Args[1] 20271 v.reset(OpAMD64SARQ) 20272 v.Type = t 20273 v.AddArg(x) 20274 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 20275 v0.AddArg(y) 20276 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 20277 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 20278 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 20279 v3.AuxInt = 64 20280 v3.AddArg(y) 20281 v2.AddArg(v3) 20282 v1.AddArg(v2) 20283 v0.AddArg(v1) 20284 v.AddArg(v0) 20285 return true 20286 } 20287 } 20288 func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { 20289 b := v.Block 20290 _ = b 20291 // match: (Rsh64x8 <t> x y) 20292 // cond: 20293 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 20294 for { 20295 t := v.Type 20296 x := v.Args[0] 20297 y := v.Args[1] 20298 v.reset(OpAMD64SARQ) 20299 v.Type = t 20300 v.AddArg(x) 20301 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 20302 v0.AddArg(y) 20303 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 20304 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 20305 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 20306 v3.AuxInt = 64 20307 v3.AddArg(y) 20308 v2.AddArg(v3) 20309 v1.AddArg(v2) 20310 v0.AddArg(v1) 20311 v.AddArg(v0) 20312 return true 20313 } 20314 } 20315 func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool { 20316 b := v.Block 20317 _ = b 20318 // match: (Rsh8Ux16 <t> x y) 20319 // cond: 20320 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 20321 for { 20322 t := v.Type 20323 x := v.Args[0] 20324 y := v.Args[1] 20325 v.reset(OpAMD64ANDL) 20326 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 20327 v0.AddArg(x) 20328 v0.AddArg(y) 20329 v.AddArg(v0) 20330 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 20331 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 20332 v2.AuxInt = 8 20333 v2.AddArg(y) 20334 v1.AddArg(v2) 20335 v.AddArg(v1) 20336 return true 20337 } 20338 } 20339 func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool { 20340 b := v.Block 20341 _ = b 20342 // match: (Rsh8Ux32 <t> x y) 20343 // cond: 20344 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 20345 for { 20346 t := v.Type 20347 x := v.Args[0] 20348 y := v.Args[1] 20349 v.reset(OpAMD64ANDL) 20350 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 20351 v0.AddArg(x) 20352 v0.AddArg(y) 20353 v.AddArg(v0) 20354 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 20355 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 20356 v2.AuxInt = 8 20357 v2.AddArg(y) 20358 v1.AddArg(v2) 20359 v.AddArg(v1) 20360 return true 20361 } 20362 } 20363 func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool { 20364 b := v.Block 20365 _ = b 20366 // match: (Rsh8Ux64 <t> x y) 20367 // cond: 20368 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 20369 for { 20370 t := v.Type 20371 x := v.Args[0] 20372 y := v.Args[1] 20373 v.reset(OpAMD64ANDL) 20374 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 20375 v0.AddArg(x) 20376 v0.AddArg(y) 20377 v.AddArg(v0) 20378 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 20379 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 20380 v2.AuxInt = 8 20381 v2.AddArg(y) 20382 v1.AddArg(v2) 20383 v.AddArg(v1) 20384 return true 20385 } 20386 } 20387 func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool { 20388 b := v.Block 20389 _ = b 20390 // match: (Rsh8Ux8 <t> x y) 20391 // cond: 20392 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 20393 for { 20394 t := v.Type 20395 x := v.Args[0] 20396 y := v.Args[1] 20397 v.reset(OpAMD64ANDL) 20398 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 20399 v0.AddArg(x) 20400 v0.AddArg(y) 20401 v.AddArg(v0) 20402 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 20403 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 20404 v2.AuxInt = 8 20405 v2.AddArg(y) 20406 v1.AddArg(v2) 20407 v.AddArg(v1) 20408 return true 20409 } 20410 } 20411 func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool { 20412 b := v.Block 20413 _ = b 20414 // match: (Rsh8x16 <t> x y) 20415 // cond: 20416 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 20417 for { 20418 t := v.Type 20419 x := v.Args[0] 20420 y := v.Args[1] 20421 v.reset(OpAMD64SARB) 20422 v.Type = t 20423 v.AddArg(x) 20424 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 20425 v0.AddArg(y) 20426 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 20427 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 20428 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags) 20429 v3.AuxInt = 8 20430 v3.AddArg(y) 20431 v2.AddArg(v3) 20432 v1.AddArg(v2) 20433 v0.AddArg(v1) 20434 v.AddArg(v0) 20435 return true 20436 } 20437 } 20438 func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool { 20439 b := v.Block 20440 _ = b 20441 // match: (Rsh8x32 <t> x y) 20442 // cond: 20443 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 20444 for { 20445 t := v.Type 20446 x := v.Args[0] 20447 y := v.Args[1] 20448 v.reset(OpAMD64SARB) 20449 v.Type = t 20450 v.AddArg(x) 20451 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 20452 v0.AddArg(y) 20453 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 20454 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 20455 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags) 20456 v3.AuxInt = 8 20457 v3.AddArg(y) 20458 v2.AddArg(v3) 20459 v1.AddArg(v2) 20460 v0.AddArg(v1) 20461 v.AddArg(v0) 20462 return true 20463 } 20464 } 20465 func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool { 20466 b := v.Block 20467 _ = b 20468 // match: (Rsh8x64 <t> x y) 20469 // cond: 20470 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 20471 for { 20472 t := v.Type 20473 x := v.Args[0] 20474 y := v.Args[1] 20475 v.reset(OpAMD64SARB) 20476 v.Type = t 20477 v.AddArg(x) 20478 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 20479 v0.AddArg(y) 20480 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 20481 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 20482 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags) 20483 v3.AuxInt = 8 20484 v3.AddArg(y) 20485 v2.AddArg(v3) 20486 v1.AddArg(v2) 20487 v0.AddArg(v1) 20488 v.AddArg(v0) 20489 return true 20490 } 20491 } 20492 func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { 20493 b := v.Block 20494 _ = b 20495 // match: (Rsh8x8 <t> x y) 20496 // cond: 20497 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 20498 for { 20499 t := v.Type 20500 x := v.Args[0] 20501 y := v.Args[1] 20502 v.reset(OpAMD64SARB) 20503 v.Type = t 20504 v.AddArg(x) 20505 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 20506 v0.AddArg(y) 20507 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 20508 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 20509 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags) 20510 v3.AuxInt = 8 20511 v3.AddArg(y) 20512 v2.AddArg(v3) 20513 v1.AddArg(v2) 20514 v0.AddArg(v1) 20515 v.AddArg(v0) 20516 return true 20517 } 20518 } 20519 func rewriteValueAMD64_OpSelect0(v *Value, config *Config) bool { 20520 b := v.Block 20521 _ = b 20522 // match: (Select0 <t> (AddTupleFirst32 tuple val)) 20523 // cond: 20524 // result: (ADDL val (Select0 <t> tuple)) 20525 for { 20526 t := v.Type 20527 v_0 := v.Args[0] 20528 if v_0.Op != OpAMD64AddTupleFirst32 { 20529 break 20530 } 20531 tuple := v_0.Args[0] 20532 val := v_0.Args[1] 20533 v.reset(OpAMD64ADDL) 20534 v.AddArg(val) 20535 v0 := b.NewValue0(v.Pos, OpSelect0, t) 20536 v0.AddArg(tuple) 20537 v.AddArg(v0) 20538 return true 20539 } 20540 // match: (Select0 <t> (AddTupleFirst64 tuple val)) 20541 // cond: 20542 // result: (ADDQ val (Select0 <t> tuple)) 20543 for { 20544 t := v.Type 20545 v_0 := v.Args[0] 20546 if v_0.Op != OpAMD64AddTupleFirst64 { 20547 break 20548 } 20549 tuple := v_0.Args[0] 20550 val := v_0.Args[1] 20551 v.reset(OpAMD64ADDQ) 20552 v.AddArg(val) 20553 v0 := b.NewValue0(v.Pos, OpSelect0, t) 20554 v0.AddArg(tuple) 20555 v.AddArg(v0) 20556 return true 20557 } 20558 return false 20559 } 20560 func rewriteValueAMD64_OpSelect1(v *Value, config *Config) bool { 20561 b := v.Block 20562 _ = b 20563 // match: (Select1 (AddTupleFirst32 tuple _ )) 20564 // cond: 20565 // result: (Select1 tuple) 20566 for { 20567 v_0 := v.Args[0] 20568 if v_0.Op != OpAMD64AddTupleFirst32 { 20569 break 20570 } 20571 tuple := v_0.Args[0] 20572 v.reset(OpSelect1) 20573 v.AddArg(tuple) 20574 return true 20575 } 20576 // match: (Select1 (AddTupleFirst64 tuple _ )) 20577 // cond: 20578 // result: (Select1 tuple) 20579 for { 20580 v_0 := v.Args[0] 20581 if v_0.Op != OpAMD64AddTupleFirst64 { 20582 break 20583 } 20584 tuple := v_0.Args[0] 20585 v.reset(OpSelect1) 20586 v.AddArg(tuple) 20587 return true 20588 } 20589 return false 20590 } 20591 func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool { 20592 b := v.Block 20593 _ = b 20594 // match: (SignExt16to32 x) 20595 // cond: 20596 // result: (MOVWQSX x) 20597 for { 20598 x := v.Args[0] 20599 v.reset(OpAMD64MOVWQSX) 20600 v.AddArg(x) 20601 return true 20602 } 20603 } 20604 func rewriteValueAMD64_OpSignExt16to64(v *Value, config *Config) bool { 20605 b := v.Block 20606 _ = b 20607 // match: (SignExt16to64 x) 20608 // cond: 20609 // result: (MOVWQSX x) 20610 for { 20611 x := v.Args[0] 20612 v.reset(OpAMD64MOVWQSX) 20613 v.AddArg(x) 20614 return true 20615 } 20616 } 20617 func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool { 20618 b := v.Block 20619 _ = b 20620 // match: (SignExt32to64 x) 20621 // cond: 20622 // result: (MOVLQSX x) 20623 for { 20624 x := v.Args[0] 20625 v.reset(OpAMD64MOVLQSX) 20626 v.AddArg(x) 20627 return true 20628 } 20629 } 20630 func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool { 20631 b := v.Block 20632 _ = b 20633 // match: (SignExt8to16 x) 20634 // cond: 20635 // result: (MOVBQSX x) 20636 for { 20637 x := v.Args[0] 20638 v.reset(OpAMD64MOVBQSX) 20639 v.AddArg(x) 20640 return true 20641 } 20642 } 20643 func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool { 20644 b := v.Block 20645 _ = b 20646 // match: (SignExt8to32 x) 20647 // cond: 20648 // result: (MOVBQSX x) 20649 for { 20650 x := v.Args[0] 20651 v.reset(OpAMD64MOVBQSX) 20652 v.AddArg(x) 20653 return true 20654 } 20655 } 20656 func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool { 20657 b := v.Block 20658 _ = b 20659 // match: (SignExt8to64 x) 20660 // cond: 20661 // result: (MOVBQSX x) 20662 for { 20663 x := v.Args[0] 20664 v.reset(OpAMD64MOVBQSX) 20665 v.AddArg(x) 20666 return true 20667 } 20668 } 20669 func rewriteValueAMD64_OpSlicemask(v *Value, config *Config) bool { 20670 b := v.Block 20671 _ = b 20672 // match: (Slicemask <t> x) 20673 // cond: 20674 // result: (SARQconst (NEGQ <t> x) [63]) 20675 for { 20676 t := v.Type 20677 x := v.Args[0] 20678 v.reset(OpAMD64SARQconst) 20679 v.AuxInt = 63 20680 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 20681 v0.AddArg(x) 20682 v.AddArg(v0) 20683 return true 20684 } 20685 } 20686 func rewriteValueAMD64_OpSqrt(v *Value, config *Config) bool { 20687 b := v.Block 20688 _ = b 20689 // match: (Sqrt x) 20690 // cond: 20691 // result: (SQRTSD x) 20692 for { 20693 x := v.Args[0] 20694 v.reset(OpAMD64SQRTSD) 20695 v.AddArg(x) 20696 return true 20697 } 20698 } 20699 func rewriteValueAMD64_OpStaticCall(v *Value, config *Config) bool { 20700 b := v.Block 20701 _ = b 20702 // match: (StaticCall [argwid] {target} mem) 20703 // cond: 20704 // result: (CALLstatic [argwid] {target} mem) 20705 for { 20706 argwid := v.AuxInt 20707 target := v.Aux 20708 mem := v.Args[0] 20709 v.reset(OpAMD64CALLstatic) 20710 v.AuxInt = argwid 20711 v.Aux = target 20712 v.AddArg(mem) 20713 return true 20714 } 20715 } 20716 func rewriteValueAMD64_OpStore(v *Value, config *Config) bool { 20717 b := v.Block 20718 _ = b 20719 // match: (Store [8] ptr val mem) 20720 // cond: is64BitFloat(val.Type) 20721 // result: (MOVSDstore ptr val mem) 20722 for { 20723 if v.AuxInt != 8 { 20724 break 20725 } 20726 ptr := v.Args[0] 20727 val := v.Args[1] 20728 mem := v.Args[2] 20729 if !(is64BitFloat(val.Type)) { 20730 break 20731 } 20732 v.reset(OpAMD64MOVSDstore) 20733 v.AddArg(ptr) 20734 v.AddArg(val) 20735 v.AddArg(mem) 20736 return true 20737 } 20738 // match: (Store [4] ptr val mem) 20739 // cond: is32BitFloat(val.Type) 20740 // result: (MOVSSstore ptr val mem) 20741 for { 20742 if v.AuxInt != 4 { 20743 break 20744 } 20745 ptr := v.Args[0] 20746 val := v.Args[1] 20747 mem := v.Args[2] 20748 if !(is32BitFloat(val.Type)) { 20749 break 20750 } 20751 v.reset(OpAMD64MOVSSstore) 20752 v.AddArg(ptr) 20753 v.AddArg(val) 20754 v.AddArg(mem) 20755 return true 20756 } 20757 // match: (Store [8] ptr val mem) 20758 // cond: 20759 // result: (MOVQstore ptr val mem) 20760 for { 20761 if v.AuxInt != 8 { 20762 break 20763 } 20764 ptr := v.Args[0] 20765 val := v.Args[1] 20766 mem := v.Args[2] 20767 v.reset(OpAMD64MOVQstore) 20768 v.AddArg(ptr) 20769 v.AddArg(val) 20770 v.AddArg(mem) 20771 return true 20772 } 20773 // match: (Store [4] ptr val mem) 20774 // cond: 20775 // result: (MOVLstore ptr val mem) 20776 for { 20777 if v.AuxInt != 4 { 20778 break 20779 } 20780 ptr := v.Args[0] 20781 val := v.Args[1] 20782 mem := v.Args[2] 20783 v.reset(OpAMD64MOVLstore) 20784 v.AddArg(ptr) 20785 v.AddArg(val) 20786 v.AddArg(mem) 20787 return true 20788 } 20789 // match: (Store [2] ptr val mem) 20790 // cond: 20791 // result: (MOVWstore ptr val mem) 20792 for { 20793 if v.AuxInt != 2 { 20794 break 20795 } 20796 ptr := v.Args[0] 20797 val := v.Args[1] 20798 mem := v.Args[2] 20799 v.reset(OpAMD64MOVWstore) 20800 v.AddArg(ptr) 20801 v.AddArg(val) 20802 v.AddArg(mem) 20803 return true 20804 } 20805 // match: (Store [1] ptr val mem) 20806 // cond: 20807 // result: (MOVBstore ptr val mem) 20808 for { 20809 if v.AuxInt != 1 { 20810 break 20811 } 20812 ptr := v.Args[0] 20813 val := v.Args[1] 20814 mem := v.Args[2] 20815 v.reset(OpAMD64MOVBstore) 20816 v.AddArg(ptr) 20817 v.AddArg(val) 20818 v.AddArg(mem) 20819 return true 20820 } 20821 return false 20822 } 20823 func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool { 20824 b := v.Block 20825 _ = b 20826 // match: (Sub16 x y) 20827 // cond: 20828 // result: (SUBL x y) 20829 for { 20830 x := v.Args[0] 20831 y := v.Args[1] 20832 v.reset(OpAMD64SUBL) 20833 v.AddArg(x) 20834 v.AddArg(y) 20835 return true 20836 } 20837 } 20838 func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool { 20839 b := v.Block 20840 _ = b 20841 // match: (Sub32 x y) 20842 // cond: 20843 // result: (SUBL x y) 20844 for { 20845 x := v.Args[0] 20846 y := v.Args[1] 20847 v.reset(OpAMD64SUBL) 20848 v.AddArg(x) 20849 v.AddArg(y) 20850 return true 20851 } 20852 } 20853 func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool { 20854 b := v.Block 20855 _ = b 20856 // match: (Sub32F x y) 20857 // cond: 20858 // result: (SUBSS x y) 20859 for { 20860 x := v.Args[0] 20861 y := v.Args[1] 20862 v.reset(OpAMD64SUBSS) 20863 v.AddArg(x) 20864 v.AddArg(y) 20865 return true 20866 } 20867 } 20868 func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool { 20869 b := v.Block 20870 _ = b 20871 // match: (Sub64 x y) 20872 // cond: 20873 // result: (SUBQ x y) 20874 for { 20875 x := v.Args[0] 20876 y := v.Args[1] 20877 v.reset(OpAMD64SUBQ) 20878 v.AddArg(x) 20879 v.AddArg(y) 20880 return true 20881 } 20882 } 20883 func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool { 20884 b := v.Block 20885 _ = b 20886 // match: (Sub64F x y) 20887 // cond: 20888 // result: (SUBSD x y) 20889 for { 20890 x := v.Args[0] 20891 y := v.Args[1] 20892 v.reset(OpAMD64SUBSD) 20893 v.AddArg(x) 20894 v.AddArg(y) 20895 return true 20896 } 20897 } 20898 func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool { 20899 b := v.Block 20900 _ = b 20901 // match: (Sub8 x y) 20902 // cond: 20903 // result: (SUBL x y) 20904 for { 20905 x := v.Args[0] 20906 y := v.Args[1] 20907 v.reset(OpAMD64SUBL) 20908 v.AddArg(x) 20909 v.AddArg(y) 20910 return true 20911 } 20912 } 20913 func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool { 20914 b := v.Block 20915 _ = b 20916 // match: (SubPtr x y) 20917 // cond: config.PtrSize == 8 20918 // result: (SUBQ x y) 20919 for { 20920 x := v.Args[0] 20921 y := v.Args[1] 20922 if !(config.PtrSize == 8) { 20923 break 20924 } 20925 v.reset(OpAMD64SUBQ) 20926 v.AddArg(x) 20927 v.AddArg(y) 20928 return true 20929 } 20930 // match: (SubPtr x y) 20931 // cond: config.PtrSize == 4 20932 // result: (SUBL x y) 20933 for { 20934 x := v.Args[0] 20935 y := v.Args[1] 20936 if !(config.PtrSize == 4) { 20937 break 20938 } 20939 v.reset(OpAMD64SUBL) 20940 v.AddArg(x) 20941 v.AddArg(y) 20942 return true 20943 } 20944 return false 20945 } 20946 func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool { 20947 b := v.Block 20948 _ = b 20949 // match: (Trunc16to8 x) 20950 // cond: 20951 // result: x 20952 for { 20953 x := v.Args[0] 20954 v.reset(OpCopy) 20955 v.Type = x.Type 20956 v.AddArg(x) 20957 return true 20958 } 20959 } 20960 func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool { 20961 b := v.Block 20962 _ = b 20963 // match: (Trunc32to16 x) 20964 // cond: 20965 // result: x 20966 for { 20967 x := v.Args[0] 20968 v.reset(OpCopy) 20969 v.Type = x.Type 20970 v.AddArg(x) 20971 return true 20972 } 20973 } 20974 func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool { 20975 b := v.Block 20976 _ = b 20977 // match: (Trunc32to8 x) 20978 // cond: 20979 // result: x 20980 for { 20981 x := v.Args[0] 20982 v.reset(OpCopy) 20983 v.Type = x.Type 20984 v.AddArg(x) 20985 return true 20986 } 20987 } 20988 func rewriteValueAMD64_OpTrunc64to16(v *Value, config *Config) bool { 20989 b := v.Block 20990 _ = b 20991 // match: (Trunc64to16 x) 20992 // cond: 20993 // result: x 20994 for { 20995 x := v.Args[0] 20996 v.reset(OpCopy) 20997 v.Type = x.Type 20998 v.AddArg(x) 20999 return true 21000 } 21001 } 21002 func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool { 21003 b := v.Block 21004 _ = b 21005 // match: (Trunc64to32 x) 21006 // cond: 21007 // result: x 21008 for { 21009 x := v.Args[0] 21010 v.reset(OpCopy) 21011 v.Type = x.Type 21012 v.AddArg(x) 21013 return true 21014 } 21015 } 21016 func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool { 21017 b := v.Block 21018 _ = b 21019 // match: (Trunc64to8 x) 21020 // cond: 21021 // result: x 21022 for { 21023 x := v.Args[0] 21024 v.reset(OpCopy) 21025 v.Type = x.Type 21026 v.AddArg(x) 21027 return true 21028 } 21029 } 21030 func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool { 21031 b := v.Block 21032 _ = b 21033 // match: (Xor16 x y) 21034 // cond: 21035 // result: (XORL x y) 21036 for { 21037 x := v.Args[0] 21038 y := v.Args[1] 21039 v.reset(OpAMD64XORL) 21040 v.AddArg(x) 21041 v.AddArg(y) 21042 return true 21043 } 21044 } 21045 func rewriteValueAMD64_OpXor32(v *Value, config *Config) bool { 21046 b := v.Block 21047 _ = b 21048 // match: (Xor32 x y) 21049 // cond: 21050 // result: (XORL x y) 21051 for { 21052 x := v.Args[0] 21053 y := v.Args[1] 21054 v.reset(OpAMD64XORL) 21055 v.AddArg(x) 21056 v.AddArg(y) 21057 return true 21058 } 21059 } 21060 func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool { 21061 b := v.Block 21062 _ = b 21063 // match: (Xor64 x y) 21064 // cond: 21065 // result: (XORQ x y) 21066 for { 21067 x := v.Args[0] 21068 y := v.Args[1] 21069 v.reset(OpAMD64XORQ) 21070 v.AddArg(x) 21071 v.AddArg(y) 21072 return true 21073 } 21074 } 21075 func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool { 21076 b := v.Block 21077 _ = b 21078 // match: (Xor8 x y) 21079 // cond: 21080 // result: (XORL x y) 21081 for { 21082 x := v.Args[0] 21083 y := v.Args[1] 21084 v.reset(OpAMD64XORL) 21085 v.AddArg(x) 21086 v.AddArg(y) 21087 return true 21088 } 21089 } 21090 func rewriteValueAMD64_OpZero(v *Value, config *Config) bool { 21091 b := v.Block 21092 _ = b 21093 // match: (Zero [s] _ mem) 21094 // cond: SizeAndAlign(s).Size() == 0 21095 // result: mem 21096 for { 21097 s := v.AuxInt 21098 mem := v.Args[1] 21099 if !(SizeAndAlign(s).Size() == 0) { 21100 break 21101 } 21102 v.reset(OpCopy) 21103 v.Type = mem.Type 21104 v.AddArg(mem) 21105 return true 21106 } 21107 // match: (Zero [s] destptr mem) 21108 // cond: SizeAndAlign(s).Size() == 1 21109 // result: (MOVBstoreconst [0] destptr mem) 21110 for { 21111 s := v.AuxInt 21112 destptr := v.Args[0] 21113 mem := v.Args[1] 21114 if !(SizeAndAlign(s).Size() == 1) { 21115 break 21116 } 21117 v.reset(OpAMD64MOVBstoreconst) 21118 v.AuxInt = 0 21119 v.AddArg(destptr) 21120 v.AddArg(mem) 21121 return true 21122 } 21123 // match: (Zero [s] destptr mem) 21124 // cond: SizeAndAlign(s).Size() == 2 21125 // result: (MOVWstoreconst [0] destptr mem) 21126 for { 21127 s := v.AuxInt 21128 destptr := v.Args[0] 21129 mem := v.Args[1] 21130 if !(SizeAndAlign(s).Size() == 2) { 21131 break 21132 } 21133 v.reset(OpAMD64MOVWstoreconst) 21134 v.AuxInt = 0 21135 v.AddArg(destptr) 21136 v.AddArg(mem) 21137 return true 21138 } 21139 // match: (Zero [s] destptr mem) 21140 // cond: SizeAndAlign(s).Size() == 4 21141 // result: (MOVLstoreconst [0] destptr mem) 21142 for { 21143 s := v.AuxInt 21144 destptr := v.Args[0] 21145 mem := v.Args[1] 21146 if !(SizeAndAlign(s).Size() == 4) { 21147 break 21148 } 21149 v.reset(OpAMD64MOVLstoreconst) 21150 v.AuxInt = 0 21151 v.AddArg(destptr) 21152 v.AddArg(mem) 21153 return true 21154 } 21155 // match: (Zero [s] destptr mem) 21156 // cond: SizeAndAlign(s).Size() == 8 21157 // result: (MOVQstoreconst [0] destptr mem) 21158 for { 21159 s := v.AuxInt 21160 destptr := v.Args[0] 21161 mem := v.Args[1] 21162 if !(SizeAndAlign(s).Size() == 8) { 21163 break 21164 } 21165 v.reset(OpAMD64MOVQstoreconst) 21166 v.AuxInt = 0 21167 v.AddArg(destptr) 21168 v.AddArg(mem) 21169 return true 21170 } 21171 // match: (Zero [s] destptr mem) 21172 // cond: SizeAndAlign(s).Size() == 3 21173 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 21174 for { 21175 s := v.AuxInt 21176 destptr := v.Args[0] 21177 mem := v.Args[1] 21178 if !(SizeAndAlign(s).Size() == 3) { 21179 break 21180 } 21181 v.reset(OpAMD64MOVBstoreconst) 21182 v.AuxInt = makeValAndOff(0, 2) 21183 v.AddArg(destptr) 21184 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, TypeMem) 21185 v0.AuxInt = 0 21186 v0.AddArg(destptr) 21187 v0.AddArg(mem) 21188 v.AddArg(v0) 21189 return true 21190 } 21191 // match: (Zero [s] destptr mem) 21192 // cond: SizeAndAlign(s).Size() == 5 21193 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 21194 for { 21195 s := v.AuxInt 21196 destptr := v.Args[0] 21197 mem := v.Args[1] 21198 if !(SizeAndAlign(s).Size() == 5) { 21199 break 21200 } 21201 v.reset(OpAMD64MOVBstoreconst) 21202 v.AuxInt = makeValAndOff(0, 4) 21203 v.AddArg(destptr) 21204 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem) 21205 v0.AuxInt = 0 21206 v0.AddArg(destptr) 21207 v0.AddArg(mem) 21208 v.AddArg(v0) 21209 return true 21210 } 21211 // match: (Zero [s] destptr mem) 21212 // cond: SizeAndAlign(s).Size() == 6 21213 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 21214 for { 21215 s := v.AuxInt 21216 destptr := v.Args[0] 21217 mem := v.Args[1] 21218 if !(SizeAndAlign(s).Size() == 6) { 21219 break 21220 } 21221 v.reset(OpAMD64MOVWstoreconst) 21222 v.AuxInt = makeValAndOff(0, 4) 21223 v.AddArg(destptr) 21224 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem) 21225 v0.AuxInt = 0 21226 v0.AddArg(destptr) 21227 v0.AddArg(mem) 21228 v.AddArg(v0) 21229 return true 21230 } 21231 // match: (Zero [s] destptr mem) 21232 // cond: SizeAndAlign(s).Size() == 7 21233 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 21234 for { 21235 s := v.AuxInt 21236 destptr := v.Args[0] 21237 mem := v.Args[1] 21238 if !(SizeAndAlign(s).Size() == 7) { 21239 break 21240 } 21241 v.reset(OpAMD64MOVLstoreconst) 21242 v.AuxInt = makeValAndOff(0, 3) 21243 v.AddArg(destptr) 21244 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem) 21245 v0.AuxInt = 0 21246 v0.AddArg(destptr) 21247 v0.AddArg(mem) 21248 v.AddArg(v0) 21249 return true 21250 } 21251 // match: (Zero [s] destptr mem) 21252 // cond: SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8 21253 // result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8]) (MOVQstoreconst [0] destptr mem)) 21254 for { 21255 s := v.AuxInt 21256 destptr := v.Args[0] 21257 mem := v.Args[1] 21258 if !(SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8) { 21259 break 21260 } 21261 v.reset(OpZero) 21262 v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8 21263 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 21264 v0.AuxInt = SizeAndAlign(s).Size() % 8 21265 v0.AddArg(destptr) 21266 v.AddArg(v0) 21267 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 21268 v1.AuxInt = 0 21269 v1.AddArg(destptr) 21270 v1.AddArg(mem) 21271 v.AddArg(v1) 21272 return true 21273 } 21274 // match: (Zero [s] destptr mem) 21275 // cond: SizeAndAlign(s).Size() == 16 21276 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 21277 for { 21278 s := v.AuxInt 21279 destptr := v.Args[0] 21280 mem := v.Args[1] 21281 if !(SizeAndAlign(s).Size() == 16) { 21282 break 21283 } 21284 v.reset(OpAMD64MOVQstoreconst) 21285 v.AuxInt = makeValAndOff(0, 8) 21286 v.AddArg(destptr) 21287 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 21288 v0.AuxInt = 0 21289 v0.AddArg(destptr) 21290 v0.AddArg(mem) 21291 v.AddArg(v0) 21292 return true 21293 } 21294 // match: (Zero [s] destptr mem) 21295 // cond: SizeAndAlign(s).Size() == 24 21296 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 21297 for { 21298 s := v.AuxInt 21299 destptr := v.Args[0] 21300 mem := v.Args[1] 21301 if !(SizeAndAlign(s).Size() == 24) { 21302 break 21303 } 21304 v.reset(OpAMD64MOVQstoreconst) 21305 v.AuxInt = makeValAndOff(0, 16) 21306 v.AddArg(destptr) 21307 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 21308 v0.AuxInt = makeValAndOff(0, 8) 21309 v0.AddArg(destptr) 21310 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 21311 v1.AuxInt = 0 21312 v1.AddArg(destptr) 21313 v1.AddArg(mem) 21314 v0.AddArg(v1) 21315 v.AddArg(v0) 21316 return true 21317 } 21318 // match: (Zero [s] destptr mem) 21319 // cond: SizeAndAlign(s).Size() == 32 21320 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 21321 for { 21322 s := v.AuxInt 21323 destptr := v.Args[0] 21324 mem := v.Args[1] 21325 if !(SizeAndAlign(s).Size() == 32) { 21326 break 21327 } 21328 v.reset(OpAMD64MOVQstoreconst) 21329 v.AuxInt = makeValAndOff(0, 24) 21330 v.AddArg(destptr) 21331 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 21332 v0.AuxInt = makeValAndOff(0, 16) 21333 v0.AddArg(destptr) 21334 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 21335 v1.AuxInt = makeValAndOff(0, 8) 21336 v1.AddArg(destptr) 21337 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem) 21338 v2.AuxInt = 0 21339 v2.AddArg(destptr) 21340 v2.AddArg(mem) 21341 v1.AddArg(v2) 21342 v0.AddArg(v1) 21343 v.AddArg(v0) 21344 return true 21345 } 21346 // match: (Zero [s] destptr mem) 21347 // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice 21348 // result: (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 21349 for { 21350 s := v.AuxInt 21351 destptr := v.Args[0] 21352 mem := v.Args[1] 21353 if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice) { 21354 break 21355 } 21356 v.reset(OpZero) 21357 v.AuxInt = SizeAndAlign(s).Size() - 8 21358 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 21359 v0.AuxInt = 8 21360 v0.AddArg(destptr) 21361 v.AddArg(v0) 21362 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem) 21363 v1.AddArg(destptr) 21364 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 21365 v2.AuxInt = 0 21366 v1.AddArg(v2) 21367 v1.AddArg(mem) 21368 v.AddArg(v1) 21369 return true 21370 } 21371 // match: (Zero [s] destptr mem) 21372 // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice 21373 // result: (DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem) 21374 for { 21375 s := v.AuxInt 21376 destptr := v.Args[0] 21377 mem := v.Args[1] 21378 if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) { 21379 break 21380 } 21381 v.reset(OpAMD64DUFFZERO) 21382 v.AuxInt = SizeAndAlign(s).Size() 21383 v.AddArg(destptr) 21384 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, TypeInt128) 21385 v0.AuxInt = 0 21386 v.AddArg(v0) 21387 v.AddArg(mem) 21388 return true 21389 } 21390 // match: (Zero [s] destptr mem) 21391 // cond: (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0 21392 // result: (REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem) 21393 for { 21394 s := v.AuxInt 21395 destptr := v.Args[0] 21396 mem := v.Args[1] 21397 if !((SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0) { 21398 break 21399 } 21400 v.reset(OpAMD64REPSTOSQ) 21401 v.AddArg(destptr) 21402 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 21403 v0.AuxInt = SizeAndAlign(s).Size() / 8 21404 v.AddArg(v0) 21405 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64()) 21406 v1.AuxInt = 0 21407 v.AddArg(v1) 21408 v.AddArg(mem) 21409 return true 21410 } 21411 return false 21412 } 21413 func rewriteValueAMD64_OpZeroExt16to32(v *Value, config *Config) bool { 21414 b := v.Block 21415 _ = b 21416 // match: (ZeroExt16to32 x) 21417 // cond: 21418 // result: (MOVWQZX x) 21419 for { 21420 x := v.Args[0] 21421 v.reset(OpAMD64MOVWQZX) 21422 v.AddArg(x) 21423 return true 21424 } 21425 } 21426 func rewriteValueAMD64_OpZeroExt16to64(v *Value, config *Config) bool { 21427 b := v.Block 21428 _ = b 21429 // match: (ZeroExt16to64 x) 21430 // cond: 21431 // result: (MOVWQZX x) 21432 for { 21433 x := v.Args[0] 21434 v.reset(OpAMD64MOVWQZX) 21435 v.AddArg(x) 21436 return true 21437 } 21438 } 21439 func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool { 21440 b := v.Block 21441 _ = b 21442 // match: (ZeroExt32to64 x) 21443 // cond: 21444 // result: (MOVLQZX x) 21445 for { 21446 x := v.Args[0] 21447 v.reset(OpAMD64MOVLQZX) 21448 v.AddArg(x) 21449 return true 21450 } 21451 } 21452 func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool { 21453 b := v.Block 21454 _ = b 21455 // match: (ZeroExt8to16 x) 21456 // cond: 21457 // result: (MOVBQZX x) 21458 for { 21459 x := v.Args[0] 21460 v.reset(OpAMD64MOVBQZX) 21461 v.AddArg(x) 21462 return true 21463 } 21464 } 21465 func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool { 21466 b := v.Block 21467 _ = b 21468 // match: (ZeroExt8to32 x) 21469 // cond: 21470 // result: (MOVBQZX x) 21471 for { 21472 x := v.Args[0] 21473 v.reset(OpAMD64MOVBQZX) 21474 v.AddArg(x) 21475 return true 21476 } 21477 } 21478 func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool { 21479 b := v.Block 21480 _ = b 21481 // match: (ZeroExt8to64 x) 21482 // cond: 21483 // result: (MOVBQZX x) 21484 for { 21485 x := v.Args[0] 21486 v.reset(OpAMD64MOVBQZX) 21487 v.AddArg(x) 21488 return true 21489 } 21490 } 21491 func rewriteBlockAMD64(b *Block, config *Config) bool { 21492 switch b.Kind { 21493 case BlockAMD64EQ: 21494 // match: (EQ (InvertFlags cmp) yes no) 21495 // cond: 21496 // result: (EQ cmp yes no) 21497 for { 21498 v := b.Control 21499 if v.Op != OpAMD64InvertFlags { 21500 break 21501 } 21502 cmp := v.Args[0] 21503 yes := b.Succs[0] 21504 no := b.Succs[1] 21505 b.Kind = BlockAMD64EQ 21506 b.SetControl(cmp) 21507 _ = yes 21508 _ = no 21509 return true 21510 } 21511 // match: (EQ (FlagEQ) yes no) 21512 // cond: 21513 // result: (First nil yes no) 21514 for { 21515 v := b.Control 21516 if v.Op != OpAMD64FlagEQ { 21517 break 21518 } 21519 yes := b.Succs[0] 21520 no := b.Succs[1] 21521 b.Kind = BlockFirst 21522 b.SetControl(nil) 21523 _ = yes 21524 _ = no 21525 return true 21526 } 21527 // match: (EQ (FlagLT_ULT) yes no) 21528 // cond: 21529 // result: (First nil no yes) 21530 for { 21531 v := b.Control 21532 if v.Op != OpAMD64FlagLT_ULT { 21533 break 21534 } 21535 yes := b.Succs[0] 21536 no := b.Succs[1] 21537 b.Kind = BlockFirst 21538 b.SetControl(nil) 21539 b.swapSuccessors() 21540 _ = no 21541 _ = yes 21542 return true 21543 } 21544 // match: (EQ (FlagLT_UGT) yes no) 21545 // cond: 21546 // result: (First nil no yes) 21547 for { 21548 v := b.Control 21549 if v.Op != OpAMD64FlagLT_UGT { 21550 break 21551 } 21552 yes := b.Succs[0] 21553 no := b.Succs[1] 21554 b.Kind = BlockFirst 21555 b.SetControl(nil) 21556 b.swapSuccessors() 21557 _ = no 21558 _ = yes 21559 return true 21560 } 21561 // match: (EQ (FlagGT_ULT) yes no) 21562 // cond: 21563 // result: (First nil no yes) 21564 for { 21565 v := b.Control 21566 if v.Op != OpAMD64FlagGT_ULT { 21567 break 21568 } 21569 yes := b.Succs[0] 21570 no := b.Succs[1] 21571 b.Kind = BlockFirst 21572 b.SetControl(nil) 21573 b.swapSuccessors() 21574 _ = no 21575 _ = yes 21576 return true 21577 } 21578 // match: (EQ (FlagGT_UGT) yes no) 21579 // cond: 21580 // result: (First nil no yes) 21581 for { 21582 v := b.Control 21583 if v.Op != OpAMD64FlagGT_UGT { 21584 break 21585 } 21586 yes := b.Succs[0] 21587 no := b.Succs[1] 21588 b.Kind = BlockFirst 21589 b.SetControl(nil) 21590 b.swapSuccessors() 21591 _ = no 21592 _ = yes 21593 return true 21594 } 21595 case BlockAMD64GE: 21596 // match: (GE (InvertFlags cmp) yes no) 21597 // cond: 21598 // result: (LE cmp yes no) 21599 for { 21600 v := b.Control 21601 if v.Op != OpAMD64InvertFlags { 21602 break 21603 } 21604 cmp := v.Args[0] 21605 yes := b.Succs[0] 21606 no := b.Succs[1] 21607 b.Kind = BlockAMD64LE 21608 b.SetControl(cmp) 21609 _ = yes 21610 _ = no 21611 return true 21612 } 21613 // match: (GE (FlagEQ) yes no) 21614 // cond: 21615 // result: (First nil yes no) 21616 for { 21617 v := b.Control 21618 if v.Op != OpAMD64FlagEQ { 21619 break 21620 } 21621 yes := b.Succs[0] 21622 no := b.Succs[1] 21623 b.Kind = BlockFirst 21624 b.SetControl(nil) 21625 _ = yes 21626 _ = no 21627 return true 21628 } 21629 // match: (GE (FlagLT_ULT) yes no) 21630 // cond: 21631 // result: (First nil no yes) 21632 for { 21633 v := b.Control 21634 if v.Op != OpAMD64FlagLT_ULT { 21635 break 21636 } 21637 yes := b.Succs[0] 21638 no := b.Succs[1] 21639 b.Kind = BlockFirst 21640 b.SetControl(nil) 21641 b.swapSuccessors() 21642 _ = no 21643 _ = yes 21644 return true 21645 } 21646 // match: (GE (FlagLT_UGT) yes no) 21647 // cond: 21648 // result: (First nil no yes) 21649 for { 21650 v := b.Control 21651 if v.Op != OpAMD64FlagLT_UGT { 21652 break 21653 } 21654 yes := b.Succs[0] 21655 no := b.Succs[1] 21656 b.Kind = BlockFirst 21657 b.SetControl(nil) 21658 b.swapSuccessors() 21659 _ = no 21660 _ = yes 21661 return true 21662 } 21663 // match: (GE (FlagGT_ULT) yes no) 21664 // cond: 21665 // result: (First nil yes no) 21666 for { 21667 v := b.Control 21668 if v.Op != OpAMD64FlagGT_ULT { 21669 break 21670 } 21671 yes := b.Succs[0] 21672 no := b.Succs[1] 21673 b.Kind = BlockFirst 21674 b.SetControl(nil) 21675 _ = yes 21676 _ = no 21677 return true 21678 } 21679 // match: (GE (FlagGT_UGT) yes no) 21680 // cond: 21681 // result: (First nil yes no) 21682 for { 21683 v := b.Control 21684 if v.Op != OpAMD64FlagGT_UGT { 21685 break 21686 } 21687 yes := b.Succs[0] 21688 no := b.Succs[1] 21689 b.Kind = BlockFirst 21690 b.SetControl(nil) 21691 _ = yes 21692 _ = no 21693 return true 21694 } 21695 case BlockAMD64GT: 21696 // match: (GT (InvertFlags cmp) yes no) 21697 // cond: 21698 // result: (LT cmp yes no) 21699 for { 21700 v := b.Control 21701 if v.Op != OpAMD64InvertFlags { 21702 break 21703 } 21704 cmp := v.Args[0] 21705 yes := b.Succs[0] 21706 no := b.Succs[1] 21707 b.Kind = BlockAMD64LT 21708 b.SetControl(cmp) 21709 _ = yes 21710 _ = no 21711 return true 21712 } 21713 // match: (GT (FlagEQ) yes no) 21714 // cond: 21715 // result: (First nil no yes) 21716 for { 21717 v := b.Control 21718 if v.Op != OpAMD64FlagEQ { 21719 break 21720 } 21721 yes := b.Succs[0] 21722 no := b.Succs[1] 21723 b.Kind = BlockFirst 21724 b.SetControl(nil) 21725 b.swapSuccessors() 21726 _ = no 21727 _ = yes 21728 return true 21729 } 21730 // match: (GT (FlagLT_ULT) yes no) 21731 // cond: 21732 // result: (First nil no yes) 21733 for { 21734 v := b.Control 21735 if v.Op != OpAMD64FlagLT_ULT { 21736 break 21737 } 21738 yes := b.Succs[0] 21739 no := b.Succs[1] 21740 b.Kind = BlockFirst 21741 b.SetControl(nil) 21742 b.swapSuccessors() 21743 _ = no 21744 _ = yes 21745 return true 21746 } 21747 // match: (GT (FlagLT_UGT) yes no) 21748 // cond: 21749 // result: (First nil no yes) 21750 for { 21751 v := b.Control 21752 if v.Op != OpAMD64FlagLT_UGT { 21753 break 21754 } 21755 yes := b.Succs[0] 21756 no := b.Succs[1] 21757 b.Kind = BlockFirst 21758 b.SetControl(nil) 21759 b.swapSuccessors() 21760 _ = no 21761 _ = yes 21762 return true 21763 } 21764 // match: (GT (FlagGT_ULT) yes no) 21765 // cond: 21766 // result: (First nil yes no) 21767 for { 21768 v := b.Control 21769 if v.Op != OpAMD64FlagGT_ULT { 21770 break 21771 } 21772 yes := b.Succs[0] 21773 no := b.Succs[1] 21774 b.Kind = BlockFirst 21775 b.SetControl(nil) 21776 _ = yes 21777 _ = no 21778 return true 21779 } 21780 // match: (GT (FlagGT_UGT) yes no) 21781 // cond: 21782 // result: (First nil yes no) 21783 for { 21784 v := b.Control 21785 if v.Op != OpAMD64FlagGT_UGT { 21786 break 21787 } 21788 yes := b.Succs[0] 21789 no := b.Succs[1] 21790 b.Kind = BlockFirst 21791 b.SetControl(nil) 21792 _ = yes 21793 _ = no 21794 return true 21795 } 21796 case BlockIf: 21797 // match: (If (SETL cmp) yes no) 21798 // cond: 21799 // result: (LT cmp yes no) 21800 for { 21801 v := b.Control 21802 if v.Op != OpAMD64SETL { 21803 break 21804 } 21805 cmp := v.Args[0] 21806 yes := b.Succs[0] 21807 no := b.Succs[1] 21808 b.Kind = BlockAMD64LT 21809 b.SetControl(cmp) 21810 _ = yes 21811 _ = no 21812 return true 21813 } 21814 // match: (If (SETLE cmp) yes no) 21815 // cond: 21816 // result: (LE cmp yes no) 21817 for { 21818 v := b.Control 21819 if v.Op != OpAMD64SETLE { 21820 break 21821 } 21822 cmp := v.Args[0] 21823 yes := b.Succs[0] 21824 no := b.Succs[1] 21825 b.Kind = BlockAMD64LE 21826 b.SetControl(cmp) 21827 _ = yes 21828 _ = no 21829 return true 21830 } 21831 // match: (If (SETG cmp) yes no) 21832 // cond: 21833 // result: (GT cmp yes no) 21834 for { 21835 v := b.Control 21836 if v.Op != OpAMD64SETG { 21837 break 21838 } 21839 cmp := v.Args[0] 21840 yes := b.Succs[0] 21841 no := b.Succs[1] 21842 b.Kind = BlockAMD64GT 21843 b.SetControl(cmp) 21844 _ = yes 21845 _ = no 21846 return true 21847 } 21848 // match: (If (SETGE cmp) yes no) 21849 // cond: 21850 // result: (GE cmp yes no) 21851 for { 21852 v := b.Control 21853 if v.Op != OpAMD64SETGE { 21854 break 21855 } 21856 cmp := v.Args[0] 21857 yes := b.Succs[0] 21858 no := b.Succs[1] 21859 b.Kind = BlockAMD64GE 21860 b.SetControl(cmp) 21861 _ = yes 21862 _ = no 21863 return true 21864 } 21865 // match: (If (SETEQ cmp) yes no) 21866 // cond: 21867 // result: (EQ cmp yes no) 21868 for { 21869 v := b.Control 21870 if v.Op != OpAMD64SETEQ { 21871 break 21872 } 21873 cmp := v.Args[0] 21874 yes := b.Succs[0] 21875 no := b.Succs[1] 21876 b.Kind = BlockAMD64EQ 21877 b.SetControl(cmp) 21878 _ = yes 21879 _ = no 21880 return true 21881 } 21882 // match: (If (SETNE cmp) yes no) 21883 // cond: 21884 // result: (NE cmp yes no) 21885 for { 21886 v := b.Control 21887 if v.Op != OpAMD64SETNE { 21888 break 21889 } 21890 cmp := v.Args[0] 21891 yes := b.Succs[0] 21892 no := b.Succs[1] 21893 b.Kind = BlockAMD64NE 21894 b.SetControl(cmp) 21895 _ = yes 21896 _ = no 21897 return true 21898 } 21899 // match: (If (SETB cmp) yes no) 21900 // cond: 21901 // result: (ULT cmp yes no) 21902 for { 21903 v := b.Control 21904 if v.Op != OpAMD64SETB { 21905 break 21906 } 21907 cmp := v.Args[0] 21908 yes := b.Succs[0] 21909 no := b.Succs[1] 21910 b.Kind = BlockAMD64ULT 21911 b.SetControl(cmp) 21912 _ = yes 21913 _ = no 21914 return true 21915 } 21916 // match: (If (SETBE cmp) yes no) 21917 // cond: 21918 // result: (ULE cmp yes no) 21919 for { 21920 v := b.Control 21921 if v.Op != OpAMD64SETBE { 21922 break 21923 } 21924 cmp := v.Args[0] 21925 yes := b.Succs[0] 21926 no := b.Succs[1] 21927 b.Kind = BlockAMD64ULE 21928 b.SetControl(cmp) 21929 _ = yes 21930 _ = no 21931 return true 21932 } 21933 // match: (If (SETA cmp) yes no) 21934 // cond: 21935 // result: (UGT cmp yes no) 21936 for { 21937 v := b.Control 21938 if v.Op != OpAMD64SETA { 21939 break 21940 } 21941 cmp := v.Args[0] 21942 yes := b.Succs[0] 21943 no := b.Succs[1] 21944 b.Kind = BlockAMD64UGT 21945 b.SetControl(cmp) 21946 _ = yes 21947 _ = no 21948 return true 21949 } 21950 // match: (If (SETAE cmp) yes no) 21951 // cond: 21952 // result: (UGE cmp yes no) 21953 for { 21954 v := b.Control 21955 if v.Op != OpAMD64SETAE { 21956 break 21957 } 21958 cmp := v.Args[0] 21959 yes := b.Succs[0] 21960 no := b.Succs[1] 21961 b.Kind = BlockAMD64UGE 21962 b.SetControl(cmp) 21963 _ = yes 21964 _ = no 21965 return true 21966 } 21967 // match: (If (SETGF cmp) yes no) 21968 // cond: 21969 // result: (UGT cmp yes no) 21970 for { 21971 v := b.Control 21972 if v.Op != OpAMD64SETGF { 21973 break 21974 } 21975 cmp := v.Args[0] 21976 yes := b.Succs[0] 21977 no := b.Succs[1] 21978 b.Kind = BlockAMD64UGT 21979 b.SetControl(cmp) 21980 _ = yes 21981 _ = no 21982 return true 21983 } 21984 // match: (If (SETGEF cmp) yes no) 21985 // cond: 21986 // result: (UGE cmp yes no) 21987 for { 21988 v := b.Control 21989 if v.Op != OpAMD64SETGEF { 21990 break 21991 } 21992 cmp := v.Args[0] 21993 yes := b.Succs[0] 21994 no := b.Succs[1] 21995 b.Kind = BlockAMD64UGE 21996 b.SetControl(cmp) 21997 _ = yes 21998 _ = no 21999 return true 22000 } 22001 // match: (If (SETEQF cmp) yes no) 22002 // cond: 22003 // result: (EQF cmp yes no) 22004 for { 22005 v := b.Control 22006 if v.Op != OpAMD64SETEQF { 22007 break 22008 } 22009 cmp := v.Args[0] 22010 yes := b.Succs[0] 22011 no := b.Succs[1] 22012 b.Kind = BlockAMD64EQF 22013 b.SetControl(cmp) 22014 _ = yes 22015 _ = no 22016 return true 22017 } 22018 // match: (If (SETNEF cmp) yes no) 22019 // cond: 22020 // result: (NEF cmp yes no) 22021 for { 22022 v := b.Control 22023 if v.Op != OpAMD64SETNEF { 22024 break 22025 } 22026 cmp := v.Args[0] 22027 yes := b.Succs[0] 22028 no := b.Succs[1] 22029 b.Kind = BlockAMD64NEF 22030 b.SetControl(cmp) 22031 _ = yes 22032 _ = no 22033 return true 22034 } 22035 // match: (If cond yes no) 22036 // cond: 22037 // result: (NE (TESTB cond cond) yes no) 22038 for { 22039 v := b.Control 22040 _ = v 22041 cond := b.Control 22042 yes := b.Succs[0] 22043 no := b.Succs[1] 22044 b.Kind = BlockAMD64NE 22045 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, TypeFlags) 22046 v0.AddArg(cond) 22047 v0.AddArg(cond) 22048 b.SetControl(v0) 22049 _ = yes 22050 _ = no 22051 return true 22052 } 22053 case BlockAMD64LE: 22054 // match: (LE (InvertFlags cmp) yes no) 22055 // cond: 22056 // result: (GE cmp yes no) 22057 for { 22058 v := b.Control 22059 if v.Op != OpAMD64InvertFlags { 22060 break 22061 } 22062 cmp := v.Args[0] 22063 yes := b.Succs[0] 22064 no := b.Succs[1] 22065 b.Kind = BlockAMD64GE 22066 b.SetControl(cmp) 22067 _ = yes 22068 _ = no 22069 return true 22070 } 22071 // match: (LE (FlagEQ) yes no) 22072 // cond: 22073 // result: (First nil yes no) 22074 for { 22075 v := b.Control 22076 if v.Op != OpAMD64FlagEQ { 22077 break 22078 } 22079 yes := b.Succs[0] 22080 no := b.Succs[1] 22081 b.Kind = BlockFirst 22082 b.SetControl(nil) 22083 _ = yes 22084 _ = no 22085 return true 22086 } 22087 // match: (LE (FlagLT_ULT) yes no) 22088 // cond: 22089 // result: (First nil yes no) 22090 for { 22091 v := b.Control 22092 if v.Op != OpAMD64FlagLT_ULT { 22093 break 22094 } 22095 yes := b.Succs[0] 22096 no := b.Succs[1] 22097 b.Kind = BlockFirst 22098 b.SetControl(nil) 22099 _ = yes 22100 _ = no 22101 return true 22102 } 22103 // match: (LE (FlagLT_UGT) yes no) 22104 // cond: 22105 // result: (First nil yes no) 22106 for { 22107 v := b.Control 22108 if v.Op != OpAMD64FlagLT_UGT { 22109 break 22110 } 22111 yes := b.Succs[0] 22112 no := b.Succs[1] 22113 b.Kind = BlockFirst 22114 b.SetControl(nil) 22115 _ = yes 22116 _ = no 22117 return true 22118 } 22119 // match: (LE (FlagGT_ULT) yes no) 22120 // cond: 22121 // result: (First nil no yes) 22122 for { 22123 v := b.Control 22124 if v.Op != OpAMD64FlagGT_ULT { 22125 break 22126 } 22127 yes := b.Succs[0] 22128 no := b.Succs[1] 22129 b.Kind = BlockFirst 22130 b.SetControl(nil) 22131 b.swapSuccessors() 22132 _ = no 22133 _ = yes 22134 return true 22135 } 22136 // match: (LE (FlagGT_UGT) yes no) 22137 // cond: 22138 // result: (First nil no yes) 22139 for { 22140 v := b.Control 22141 if v.Op != OpAMD64FlagGT_UGT { 22142 break 22143 } 22144 yes := b.Succs[0] 22145 no := b.Succs[1] 22146 b.Kind = BlockFirst 22147 b.SetControl(nil) 22148 b.swapSuccessors() 22149 _ = no 22150 _ = yes 22151 return true 22152 } 22153 case BlockAMD64LT: 22154 // match: (LT (InvertFlags cmp) yes no) 22155 // cond: 22156 // result: (GT cmp yes no) 22157 for { 22158 v := b.Control 22159 if v.Op != OpAMD64InvertFlags { 22160 break 22161 } 22162 cmp := v.Args[0] 22163 yes := b.Succs[0] 22164 no := b.Succs[1] 22165 b.Kind = BlockAMD64GT 22166 b.SetControl(cmp) 22167 _ = yes 22168 _ = no 22169 return true 22170 } 22171 // match: (LT (FlagEQ) yes no) 22172 // cond: 22173 // result: (First nil no yes) 22174 for { 22175 v := b.Control 22176 if v.Op != OpAMD64FlagEQ { 22177 break 22178 } 22179 yes := b.Succs[0] 22180 no := b.Succs[1] 22181 b.Kind = BlockFirst 22182 b.SetControl(nil) 22183 b.swapSuccessors() 22184 _ = no 22185 _ = yes 22186 return true 22187 } 22188 // match: (LT (FlagLT_ULT) yes no) 22189 // cond: 22190 // result: (First nil yes no) 22191 for { 22192 v := b.Control 22193 if v.Op != OpAMD64FlagLT_ULT { 22194 break 22195 } 22196 yes := b.Succs[0] 22197 no := b.Succs[1] 22198 b.Kind = BlockFirst 22199 b.SetControl(nil) 22200 _ = yes 22201 _ = no 22202 return true 22203 } 22204 // match: (LT (FlagLT_UGT) yes no) 22205 // cond: 22206 // result: (First nil yes no) 22207 for { 22208 v := b.Control 22209 if v.Op != OpAMD64FlagLT_UGT { 22210 break 22211 } 22212 yes := b.Succs[0] 22213 no := b.Succs[1] 22214 b.Kind = BlockFirst 22215 b.SetControl(nil) 22216 _ = yes 22217 _ = no 22218 return true 22219 } 22220 // match: (LT (FlagGT_ULT) yes no) 22221 // cond: 22222 // result: (First nil no yes) 22223 for { 22224 v := b.Control 22225 if v.Op != OpAMD64FlagGT_ULT { 22226 break 22227 } 22228 yes := b.Succs[0] 22229 no := b.Succs[1] 22230 b.Kind = BlockFirst 22231 b.SetControl(nil) 22232 b.swapSuccessors() 22233 _ = no 22234 _ = yes 22235 return true 22236 } 22237 // match: (LT (FlagGT_UGT) yes no) 22238 // cond: 22239 // result: (First nil no yes) 22240 for { 22241 v := b.Control 22242 if v.Op != OpAMD64FlagGT_UGT { 22243 break 22244 } 22245 yes := b.Succs[0] 22246 no := b.Succs[1] 22247 b.Kind = BlockFirst 22248 b.SetControl(nil) 22249 b.swapSuccessors() 22250 _ = no 22251 _ = yes 22252 return true 22253 } 22254 case BlockAMD64NE: 22255 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 22256 // cond: 22257 // result: (LT cmp yes no) 22258 for { 22259 v := b.Control 22260 if v.Op != OpAMD64TESTB { 22261 break 22262 } 22263 v_0 := v.Args[0] 22264 if v_0.Op != OpAMD64SETL { 22265 break 22266 } 22267 cmp := v_0.Args[0] 22268 v_1 := v.Args[1] 22269 if v_1.Op != OpAMD64SETL { 22270 break 22271 } 22272 if cmp != v_1.Args[0] { 22273 break 22274 } 22275 yes := b.Succs[0] 22276 no := b.Succs[1] 22277 b.Kind = BlockAMD64LT 22278 b.SetControl(cmp) 22279 _ = yes 22280 _ = no 22281 return true 22282 } 22283 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 22284 // cond: 22285 // result: (LE cmp yes no) 22286 for { 22287 v := b.Control 22288 if v.Op != OpAMD64TESTB { 22289 break 22290 } 22291 v_0 := v.Args[0] 22292 if v_0.Op != OpAMD64SETLE { 22293 break 22294 } 22295 cmp := v_0.Args[0] 22296 v_1 := v.Args[1] 22297 if v_1.Op != OpAMD64SETLE { 22298 break 22299 } 22300 if cmp != v_1.Args[0] { 22301 break 22302 } 22303 yes := b.Succs[0] 22304 no := b.Succs[1] 22305 b.Kind = BlockAMD64LE 22306 b.SetControl(cmp) 22307 _ = yes 22308 _ = no 22309 return true 22310 } 22311 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 22312 // cond: 22313 // result: (GT cmp yes no) 22314 for { 22315 v := b.Control 22316 if v.Op != OpAMD64TESTB { 22317 break 22318 } 22319 v_0 := v.Args[0] 22320 if v_0.Op != OpAMD64SETG { 22321 break 22322 } 22323 cmp := v_0.Args[0] 22324 v_1 := v.Args[1] 22325 if v_1.Op != OpAMD64SETG { 22326 break 22327 } 22328 if cmp != v_1.Args[0] { 22329 break 22330 } 22331 yes := b.Succs[0] 22332 no := b.Succs[1] 22333 b.Kind = BlockAMD64GT 22334 b.SetControl(cmp) 22335 _ = yes 22336 _ = no 22337 return true 22338 } 22339 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 22340 // cond: 22341 // result: (GE cmp yes no) 22342 for { 22343 v := b.Control 22344 if v.Op != OpAMD64TESTB { 22345 break 22346 } 22347 v_0 := v.Args[0] 22348 if v_0.Op != OpAMD64SETGE { 22349 break 22350 } 22351 cmp := v_0.Args[0] 22352 v_1 := v.Args[1] 22353 if v_1.Op != OpAMD64SETGE { 22354 break 22355 } 22356 if cmp != v_1.Args[0] { 22357 break 22358 } 22359 yes := b.Succs[0] 22360 no := b.Succs[1] 22361 b.Kind = BlockAMD64GE 22362 b.SetControl(cmp) 22363 _ = yes 22364 _ = no 22365 return true 22366 } 22367 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 22368 // cond: 22369 // result: (EQ cmp yes no) 22370 for { 22371 v := b.Control 22372 if v.Op != OpAMD64TESTB { 22373 break 22374 } 22375 v_0 := v.Args[0] 22376 if v_0.Op != OpAMD64SETEQ { 22377 break 22378 } 22379 cmp := v_0.Args[0] 22380 v_1 := v.Args[1] 22381 if v_1.Op != OpAMD64SETEQ { 22382 break 22383 } 22384 if cmp != v_1.Args[0] { 22385 break 22386 } 22387 yes := b.Succs[0] 22388 no := b.Succs[1] 22389 b.Kind = BlockAMD64EQ 22390 b.SetControl(cmp) 22391 _ = yes 22392 _ = no 22393 return true 22394 } 22395 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 22396 // cond: 22397 // result: (NE cmp yes no) 22398 for { 22399 v := b.Control 22400 if v.Op != OpAMD64TESTB { 22401 break 22402 } 22403 v_0 := v.Args[0] 22404 if v_0.Op != OpAMD64SETNE { 22405 break 22406 } 22407 cmp := v_0.Args[0] 22408 v_1 := v.Args[1] 22409 if v_1.Op != OpAMD64SETNE { 22410 break 22411 } 22412 if cmp != v_1.Args[0] { 22413 break 22414 } 22415 yes := b.Succs[0] 22416 no := b.Succs[1] 22417 b.Kind = BlockAMD64NE 22418 b.SetControl(cmp) 22419 _ = yes 22420 _ = no 22421 return true 22422 } 22423 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 22424 // cond: 22425 // result: (ULT cmp yes no) 22426 for { 22427 v := b.Control 22428 if v.Op != OpAMD64TESTB { 22429 break 22430 } 22431 v_0 := v.Args[0] 22432 if v_0.Op != OpAMD64SETB { 22433 break 22434 } 22435 cmp := v_0.Args[0] 22436 v_1 := v.Args[1] 22437 if v_1.Op != OpAMD64SETB { 22438 break 22439 } 22440 if cmp != v_1.Args[0] { 22441 break 22442 } 22443 yes := b.Succs[0] 22444 no := b.Succs[1] 22445 b.Kind = BlockAMD64ULT 22446 b.SetControl(cmp) 22447 _ = yes 22448 _ = no 22449 return true 22450 } 22451 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 22452 // cond: 22453 // result: (ULE cmp yes no) 22454 for { 22455 v := b.Control 22456 if v.Op != OpAMD64TESTB { 22457 break 22458 } 22459 v_0 := v.Args[0] 22460 if v_0.Op != OpAMD64SETBE { 22461 break 22462 } 22463 cmp := v_0.Args[0] 22464 v_1 := v.Args[1] 22465 if v_1.Op != OpAMD64SETBE { 22466 break 22467 } 22468 if cmp != v_1.Args[0] { 22469 break 22470 } 22471 yes := b.Succs[0] 22472 no := b.Succs[1] 22473 b.Kind = BlockAMD64ULE 22474 b.SetControl(cmp) 22475 _ = yes 22476 _ = no 22477 return true 22478 } 22479 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 22480 // cond: 22481 // result: (UGT cmp yes no) 22482 for { 22483 v := b.Control 22484 if v.Op != OpAMD64TESTB { 22485 break 22486 } 22487 v_0 := v.Args[0] 22488 if v_0.Op != OpAMD64SETA { 22489 break 22490 } 22491 cmp := v_0.Args[0] 22492 v_1 := v.Args[1] 22493 if v_1.Op != OpAMD64SETA { 22494 break 22495 } 22496 if cmp != v_1.Args[0] { 22497 break 22498 } 22499 yes := b.Succs[0] 22500 no := b.Succs[1] 22501 b.Kind = BlockAMD64UGT 22502 b.SetControl(cmp) 22503 _ = yes 22504 _ = no 22505 return true 22506 } 22507 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 22508 // cond: 22509 // result: (UGE cmp yes no) 22510 for { 22511 v := b.Control 22512 if v.Op != OpAMD64TESTB { 22513 break 22514 } 22515 v_0 := v.Args[0] 22516 if v_0.Op != OpAMD64SETAE { 22517 break 22518 } 22519 cmp := v_0.Args[0] 22520 v_1 := v.Args[1] 22521 if v_1.Op != OpAMD64SETAE { 22522 break 22523 } 22524 if cmp != v_1.Args[0] { 22525 break 22526 } 22527 yes := b.Succs[0] 22528 no := b.Succs[1] 22529 b.Kind = BlockAMD64UGE 22530 b.SetControl(cmp) 22531 _ = yes 22532 _ = no 22533 return true 22534 } 22535 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 22536 // cond: 22537 // result: (UGT cmp yes no) 22538 for { 22539 v := b.Control 22540 if v.Op != OpAMD64TESTB { 22541 break 22542 } 22543 v_0 := v.Args[0] 22544 if v_0.Op != OpAMD64SETGF { 22545 break 22546 } 22547 cmp := v_0.Args[0] 22548 v_1 := v.Args[1] 22549 if v_1.Op != OpAMD64SETGF { 22550 break 22551 } 22552 if cmp != v_1.Args[0] { 22553 break 22554 } 22555 yes := b.Succs[0] 22556 no := b.Succs[1] 22557 b.Kind = BlockAMD64UGT 22558 b.SetControl(cmp) 22559 _ = yes 22560 _ = no 22561 return true 22562 } 22563 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 22564 // cond: 22565 // result: (UGE cmp yes no) 22566 for { 22567 v := b.Control 22568 if v.Op != OpAMD64TESTB { 22569 break 22570 } 22571 v_0 := v.Args[0] 22572 if v_0.Op != OpAMD64SETGEF { 22573 break 22574 } 22575 cmp := v_0.Args[0] 22576 v_1 := v.Args[1] 22577 if v_1.Op != OpAMD64SETGEF { 22578 break 22579 } 22580 if cmp != v_1.Args[0] { 22581 break 22582 } 22583 yes := b.Succs[0] 22584 no := b.Succs[1] 22585 b.Kind = BlockAMD64UGE 22586 b.SetControl(cmp) 22587 _ = yes 22588 _ = no 22589 return true 22590 } 22591 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 22592 // cond: 22593 // result: (EQF cmp yes no) 22594 for { 22595 v := b.Control 22596 if v.Op != OpAMD64TESTB { 22597 break 22598 } 22599 v_0 := v.Args[0] 22600 if v_0.Op != OpAMD64SETEQF { 22601 break 22602 } 22603 cmp := v_0.Args[0] 22604 v_1 := v.Args[1] 22605 if v_1.Op != OpAMD64SETEQF { 22606 break 22607 } 22608 if cmp != v_1.Args[0] { 22609 break 22610 } 22611 yes := b.Succs[0] 22612 no := b.Succs[1] 22613 b.Kind = BlockAMD64EQF 22614 b.SetControl(cmp) 22615 _ = yes 22616 _ = no 22617 return true 22618 } 22619 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 22620 // cond: 22621 // result: (NEF cmp yes no) 22622 for { 22623 v := b.Control 22624 if v.Op != OpAMD64TESTB { 22625 break 22626 } 22627 v_0 := v.Args[0] 22628 if v_0.Op != OpAMD64SETNEF { 22629 break 22630 } 22631 cmp := v_0.Args[0] 22632 v_1 := v.Args[1] 22633 if v_1.Op != OpAMD64SETNEF { 22634 break 22635 } 22636 if cmp != v_1.Args[0] { 22637 break 22638 } 22639 yes := b.Succs[0] 22640 no := b.Succs[1] 22641 b.Kind = BlockAMD64NEF 22642 b.SetControl(cmp) 22643 _ = yes 22644 _ = no 22645 return true 22646 } 22647 // match: (NE (InvertFlags cmp) yes no) 22648 // cond: 22649 // result: (NE cmp yes no) 22650 for { 22651 v := b.Control 22652 if v.Op != OpAMD64InvertFlags { 22653 break 22654 } 22655 cmp := v.Args[0] 22656 yes := b.Succs[0] 22657 no := b.Succs[1] 22658 b.Kind = BlockAMD64NE 22659 b.SetControl(cmp) 22660 _ = yes 22661 _ = no 22662 return true 22663 } 22664 // match: (NE (FlagEQ) yes no) 22665 // cond: 22666 // result: (First nil no yes) 22667 for { 22668 v := b.Control 22669 if v.Op != OpAMD64FlagEQ { 22670 break 22671 } 22672 yes := b.Succs[0] 22673 no := b.Succs[1] 22674 b.Kind = BlockFirst 22675 b.SetControl(nil) 22676 b.swapSuccessors() 22677 _ = no 22678 _ = yes 22679 return true 22680 } 22681 // match: (NE (FlagLT_ULT) yes no) 22682 // cond: 22683 // result: (First nil yes no) 22684 for { 22685 v := b.Control 22686 if v.Op != OpAMD64FlagLT_ULT { 22687 break 22688 } 22689 yes := b.Succs[0] 22690 no := b.Succs[1] 22691 b.Kind = BlockFirst 22692 b.SetControl(nil) 22693 _ = yes 22694 _ = no 22695 return true 22696 } 22697 // match: (NE (FlagLT_UGT) yes no) 22698 // cond: 22699 // result: (First nil yes no) 22700 for { 22701 v := b.Control 22702 if v.Op != OpAMD64FlagLT_UGT { 22703 break 22704 } 22705 yes := b.Succs[0] 22706 no := b.Succs[1] 22707 b.Kind = BlockFirst 22708 b.SetControl(nil) 22709 _ = yes 22710 _ = no 22711 return true 22712 } 22713 // match: (NE (FlagGT_ULT) yes no) 22714 // cond: 22715 // result: (First nil yes no) 22716 for { 22717 v := b.Control 22718 if v.Op != OpAMD64FlagGT_ULT { 22719 break 22720 } 22721 yes := b.Succs[0] 22722 no := b.Succs[1] 22723 b.Kind = BlockFirst 22724 b.SetControl(nil) 22725 _ = yes 22726 _ = no 22727 return true 22728 } 22729 // match: (NE (FlagGT_UGT) yes no) 22730 // cond: 22731 // result: (First nil yes no) 22732 for { 22733 v := b.Control 22734 if v.Op != OpAMD64FlagGT_UGT { 22735 break 22736 } 22737 yes := b.Succs[0] 22738 no := b.Succs[1] 22739 b.Kind = BlockFirst 22740 b.SetControl(nil) 22741 _ = yes 22742 _ = no 22743 return true 22744 } 22745 case BlockAMD64UGE: 22746 // match: (UGE (InvertFlags cmp) yes no) 22747 // cond: 22748 // result: (ULE cmp yes no) 22749 for { 22750 v := b.Control 22751 if v.Op != OpAMD64InvertFlags { 22752 break 22753 } 22754 cmp := v.Args[0] 22755 yes := b.Succs[0] 22756 no := b.Succs[1] 22757 b.Kind = BlockAMD64ULE 22758 b.SetControl(cmp) 22759 _ = yes 22760 _ = no 22761 return true 22762 } 22763 // match: (UGE (FlagEQ) yes no) 22764 // cond: 22765 // result: (First nil yes no) 22766 for { 22767 v := b.Control 22768 if v.Op != OpAMD64FlagEQ { 22769 break 22770 } 22771 yes := b.Succs[0] 22772 no := b.Succs[1] 22773 b.Kind = BlockFirst 22774 b.SetControl(nil) 22775 _ = yes 22776 _ = no 22777 return true 22778 } 22779 // match: (UGE (FlagLT_ULT) yes no) 22780 // cond: 22781 // result: (First nil no yes) 22782 for { 22783 v := b.Control 22784 if v.Op != OpAMD64FlagLT_ULT { 22785 break 22786 } 22787 yes := b.Succs[0] 22788 no := b.Succs[1] 22789 b.Kind = BlockFirst 22790 b.SetControl(nil) 22791 b.swapSuccessors() 22792 _ = no 22793 _ = yes 22794 return true 22795 } 22796 // match: (UGE (FlagLT_UGT) yes no) 22797 // cond: 22798 // result: (First nil yes no) 22799 for { 22800 v := b.Control 22801 if v.Op != OpAMD64FlagLT_UGT { 22802 break 22803 } 22804 yes := b.Succs[0] 22805 no := b.Succs[1] 22806 b.Kind = BlockFirst 22807 b.SetControl(nil) 22808 _ = yes 22809 _ = no 22810 return true 22811 } 22812 // match: (UGE (FlagGT_ULT) yes no) 22813 // cond: 22814 // result: (First nil no yes) 22815 for { 22816 v := b.Control 22817 if v.Op != OpAMD64FlagGT_ULT { 22818 break 22819 } 22820 yes := b.Succs[0] 22821 no := b.Succs[1] 22822 b.Kind = BlockFirst 22823 b.SetControl(nil) 22824 b.swapSuccessors() 22825 _ = no 22826 _ = yes 22827 return true 22828 } 22829 // match: (UGE (FlagGT_UGT) yes no) 22830 // cond: 22831 // result: (First nil yes no) 22832 for { 22833 v := b.Control 22834 if v.Op != OpAMD64FlagGT_UGT { 22835 break 22836 } 22837 yes := b.Succs[0] 22838 no := b.Succs[1] 22839 b.Kind = BlockFirst 22840 b.SetControl(nil) 22841 _ = yes 22842 _ = no 22843 return true 22844 } 22845 case BlockAMD64UGT: 22846 // match: (UGT (InvertFlags cmp) yes no) 22847 // cond: 22848 // result: (ULT cmp yes no) 22849 for { 22850 v := b.Control 22851 if v.Op != OpAMD64InvertFlags { 22852 break 22853 } 22854 cmp := v.Args[0] 22855 yes := b.Succs[0] 22856 no := b.Succs[1] 22857 b.Kind = BlockAMD64ULT 22858 b.SetControl(cmp) 22859 _ = yes 22860 _ = no 22861 return true 22862 } 22863 // match: (UGT (FlagEQ) yes no) 22864 // cond: 22865 // result: (First nil no yes) 22866 for { 22867 v := b.Control 22868 if v.Op != OpAMD64FlagEQ { 22869 break 22870 } 22871 yes := b.Succs[0] 22872 no := b.Succs[1] 22873 b.Kind = BlockFirst 22874 b.SetControl(nil) 22875 b.swapSuccessors() 22876 _ = no 22877 _ = yes 22878 return true 22879 } 22880 // match: (UGT (FlagLT_ULT) yes no) 22881 // cond: 22882 // result: (First nil no yes) 22883 for { 22884 v := b.Control 22885 if v.Op != OpAMD64FlagLT_ULT { 22886 break 22887 } 22888 yes := b.Succs[0] 22889 no := b.Succs[1] 22890 b.Kind = BlockFirst 22891 b.SetControl(nil) 22892 b.swapSuccessors() 22893 _ = no 22894 _ = yes 22895 return true 22896 } 22897 // match: (UGT (FlagLT_UGT) yes no) 22898 // cond: 22899 // result: (First nil yes no) 22900 for { 22901 v := b.Control 22902 if v.Op != OpAMD64FlagLT_UGT { 22903 break 22904 } 22905 yes := b.Succs[0] 22906 no := b.Succs[1] 22907 b.Kind = BlockFirst 22908 b.SetControl(nil) 22909 _ = yes 22910 _ = no 22911 return true 22912 } 22913 // match: (UGT (FlagGT_ULT) yes no) 22914 // cond: 22915 // result: (First nil no yes) 22916 for { 22917 v := b.Control 22918 if v.Op != OpAMD64FlagGT_ULT { 22919 break 22920 } 22921 yes := b.Succs[0] 22922 no := b.Succs[1] 22923 b.Kind = BlockFirst 22924 b.SetControl(nil) 22925 b.swapSuccessors() 22926 _ = no 22927 _ = yes 22928 return true 22929 } 22930 // match: (UGT (FlagGT_UGT) yes no) 22931 // cond: 22932 // result: (First nil yes no) 22933 for { 22934 v := b.Control 22935 if v.Op != OpAMD64FlagGT_UGT { 22936 break 22937 } 22938 yes := b.Succs[0] 22939 no := b.Succs[1] 22940 b.Kind = BlockFirst 22941 b.SetControl(nil) 22942 _ = yes 22943 _ = no 22944 return true 22945 } 22946 case BlockAMD64ULE: 22947 // match: (ULE (InvertFlags cmp) yes no) 22948 // cond: 22949 // result: (UGE cmp yes no) 22950 for { 22951 v := b.Control 22952 if v.Op != OpAMD64InvertFlags { 22953 break 22954 } 22955 cmp := v.Args[0] 22956 yes := b.Succs[0] 22957 no := b.Succs[1] 22958 b.Kind = BlockAMD64UGE 22959 b.SetControl(cmp) 22960 _ = yes 22961 _ = no 22962 return true 22963 } 22964 // match: (ULE (FlagEQ) yes no) 22965 // cond: 22966 // result: (First nil yes no) 22967 for { 22968 v := b.Control 22969 if v.Op != OpAMD64FlagEQ { 22970 break 22971 } 22972 yes := b.Succs[0] 22973 no := b.Succs[1] 22974 b.Kind = BlockFirst 22975 b.SetControl(nil) 22976 _ = yes 22977 _ = no 22978 return true 22979 } 22980 // match: (ULE (FlagLT_ULT) yes no) 22981 // cond: 22982 // result: (First nil yes no) 22983 for { 22984 v := b.Control 22985 if v.Op != OpAMD64FlagLT_ULT { 22986 break 22987 } 22988 yes := b.Succs[0] 22989 no := b.Succs[1] 22990 b.Kind = BlockFirst 22991 b.SetControl(nil) 22992 _ = yes 22993 _ = no 22994 return true 22995 } 22996 // match: (ULE (FlagLT_UGT) yes no) 22997 // cond: 22998 // result: (First nil no yes) 22999 for { 23000 v := b.Control 23001 if v.Op != OpAMD64FlagLT_UGT { 23002 break 23003 } 23004 yes := b.Succs[0] 23005 no := b.Succs[1] 23006 b.Kind = BlockFirst 23007 b.SetControl(nil) 23008 b.swapSuccessors() 23009 _ = no 23010 _ = yes 23011 return true 23012 } 23013 // match: (ULE (FlagGT_ULT) yes no) 23014 // cond: 23015 // result: (First nil yes no) 23016 for { 23017 v := b.Control 23018 if v.Op != OpAMD64FlagGT_ULT { 23019 break 23020 } 23021 yes := b.Succs[0] 23022 no := b.Succs[1] 23023 b.Kind = BlockFirst 23024 b.SetControl(nil) 23025 _ = yes 23026 _ = no 23027 return true 23028 } 23029 // match: (ULE (FlagGT_UGT) yes no) 23030 // cond: 23031 // result: (First nil no yes) 23032 for { 23033 v := b.Control 23034 if v.Op != OpAMD64FlagGT_UGT { 23035 break 23036 } 23037 yes := b.Succs[0] 23038 no := b.Succs[1] 23039 b.Kind = BlockFirst 23040 b.SetControl(nil) 23041 b.swapSuccessors() 23042 _ = no 23043 _ = yes 23044 return true 23045 } 23046 case BlockAMD64ULT: 23047 // match: (ULT (InvertFlags cmp) yes no) 23048 // cond: 23049 // result: (UGT cmp yes no) 23050 for { 23051 v := b.Control 23052 if v.Op != OpAMD64InvertFlags { 23053 break 23054 } 23055 cmp := v.Args[0] 23056 yes := b.Succs[0] 23057 no := b.Succs[1] 23058 b.Kind = BlockAMD64UGT 23059 b.SetControl(cmp) 23060 _ = yes 23061 _ = no 23062 return true 23063 } 23064 // match: (ULT (FlagEQ) yes no) 23065 // cond: 23066 // result: (First nil no yes) 23067 for { 23068 v := b.Control 23069 if v.Op != OpAMD64FlagEQ { 23070 break 23071 } 23072 yes := b.Succs[0] 23073 no := b.Succs[1] 23074 b.Kind = BlockFirst 23075 b.SetControl(nil) 23076 b.swapSuccessors() 23077 _ = no 23078 _ = yes 23079 return true 23080 } 23081 // match: (ULT (FlagLT_ULT) yes no) 23082 // cond: 23083 // result: (First nil yes no) 23084 for { 23085 v := b.Control 23086 if v.Op != OpAMD64FlagLT_ULT { 23087 break 23088 } 23089 yes := b.Succs[0] 23090 no := b.Succs[1] 23091 b.Kind = BlockFirst 23092 b.SetControl(nil) 23093 _ = yes 23094 _ = no 23095 return true 23096 } 23097 // match: (ULT (FlagLT_UGT) yes no) 23098 // cond: 23099 // result: (First nil no yes) 23100 for { 23101 v := b.Control 23102 if v.Op != OpAMD64FlagLT_UGT { 23103 break 23104 } 23105 yes := b.Succs[0] 23106 no := b.Succs[1] 23107 b.Kind = BlockFirst 23108 b.SetControl(nil) 23109 b.swapSuccessors() 23110 _ = no 23111 _ = yes 23112 return true 23113 } 23114 // match: (ULT (FlagGT_ULT) yes no) 23115 // cond: 23116 // result: (First nil yes no) 23117 for { 23118 v := b.Control 23119 if v.Op != OpAMD64FlagGT_ULT { 23120 break 23121 } 23122 yes := b.Succs[0] 23123 no := b.Succs[1] 23124 b.Kind = BlockFirst 23125 b.SetControl(nil) 23126 _ = yes 23127 _ = no 23128 return true 23129 } 23130 // match: (ULT (FlagGT_UGT) yes no) 23131 // cond: 23132 // result: (First nil no yes) 23133 for { 23134 v := b.Control 23135 if v.Op != OpAMD64FlagGT_UGT { 23136 break 23137 } 23138 yes := b.Succs[0] 23139 no := b.Succs[1] 23140 b.Kind = BlockFirst 23141 b.SetControl(nil) 23142 b.swapSuccessors() 23143 _ = no 23144 _ = yes 23145 return true 23146 } 23147 } 23148 return false 23149 }