github.com/bir3/gocompiler@v0.3.205/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from _gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd _gen; go run . 3 4 package ssa 5 6 import "github.com/bir3/gocompiler/src/internal/buildcfg" 7 import "math" 8 import "github.com/bir3/gocompiler/src/cmd/internal/obj" 9 import "github.com/bir3/gocompiler/src/cmd/compile/internal/types" 10 11 func rewriteValueAMD64(v *Value) bool { 12 switch v.Op { 13 case OpAMD64ADCQ: 14 return rewriteValueAMD64_OpAMD64ADCQ(v) 15 case OpAMD64ADCQconst: 16 return rewriteValueAMD64_OpAMD64ADCQconst(v) 17 case OpAMD64ADDL: 18 return rewriteValueAMD64_OpAMD64ADDL(v) 19 case OpAMD64ADDLconst: 20 return rewriteValueAMD64_OpAMD64ADDLconst(v) 21 case OpAMD64ADDLconstmodify: 22 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v) 23 case OpAMD64ADDLload: 24 return rewriteValueAMD64_OpAMD64ADDLload(v) 25 case OpAMD64ADDLmodify: 26 return rewriteValueAMD64_OpAMD64ADDLmodify(v) 27 case OpAMD64ADDQ: 28 return rewriteValueAMD64_OpAMD64ADDQ(v) 29 case OpAMD64ADDQcarry: 30 return rewriteValueAMD64_OpAMD64ADDQcarry(v) 31 case OpAMD64ADDQconst: 32 return rewriteValueAMD64_OpAMD64ADDQconst(v) 33 case OpAMD64ADDQconstmodify: 34 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v) 35 case OpAMD64ADDQload: 36 return rewriteValueAMD64_OpAMD64ADDQload(v) 37 case OpAMD64ADDQmodify: 38 return rewriteValueAMD64_OpAMD64ADDQmodify(v) 39 case OpAMD64ADDSD: 40 return rewriteValueAMD64_OpAMD64ADDSD(v) 41 case OpAMD64ADDSDload: 42 return rewriteValueAMD64_OpAMD64ADDSDload(v) 43 case OpAMD64ADDSS: 44 return rewriteValueAMD64_OpAMD64ADDSS(v) 45 case OpAMD64ADDSSload: 46 return rewriteValueAMD64_OpAMD64ADDSSload(v) 47 case OpAMD64ANDL: 48 return rewriteValueAMD64_OpAMD64ANDL(v) 49 case OpAMD64ANDLconst: 50 return rewriteValueAMD64_OpAMD64ANDLconst(v) 51 case OpAMD64ANDLconstmodify: 52 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v) 53 case OpAMD64ANDLload: 54 return rewriteValueAMD64_OpAMD64ANDLload(v) 55 case OpAMD64ANDLmodify: 56 return rewriteValueAMD64_OpAMD64ANDLmodify(v) 57 case OpAMD64ANDNL: 58 return rewriteValueAMD64_OpAMD64ANDNL(v) 59 case OpAMD64ANDNQ: 60 return rewriteValueAMD64_OpAMD64ANDNQ(v) 61 case OpAMD64ANDQ: 62 return rewriteValueAMD64_OpAMD64ANDQ(v) 63 case OpAMD64ANDQconst: 64 return rewriteValueAMD64_OpAMD64ANDQconst(v) 65 case OpAMD64ANDQconstmodify: 66 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v) 67 case OpAMD64ANDQload: 68 return rewriteValueAMD64_OpAMD64ANDQload(v) 69 case OpAMD64ANDQmodify: 70 return rewriteValueAMD64_OpAMD64ANDQmodify(v) 71 case OpAMD64BSFQ: 72 return rewriteValueAMD64_OpAMD64BSFQ(v) 73 case OpAMD64BSWAPL: 74 return rewriteValueAMD64_OpAMD64BSWAPL(v) 75 case OpAMD64BSWAPQ: 76 return rewriteValueAMD64_OpAMD64BSWAPQ(v) 77 case OpAMD64BTCLconst: 78 return rewriteValueAMD64_OpAMD64BTCLconst(v) 79 case OpAMD64BTCQconst: 80 return rewriteValueAMD64_OpAMD64BTCQconst(v) 81 case OpAMD64BTLconst: 82 return rewriteValueAMD64_OpAMD64BTLconst(v) 83 case OpAMD64BTQconst: 84 return rewriteValueAMD64_OpAMD64BTQconst(v) 85 case OpAMD64BTRLconst: 86 return rewriteValueAMD64_OpAMD64BTRLconst(v) 87 case OpAMD64BTRQconst: 88 return rewriteValueAMD64_OpAMD64BTRQconst(v) 89 case OpAMD64BTSLconst: 90 return rewriteValueAMD64_OpAMD64BTSLconst(v) 91 case OpAMD64BTSQconst: 92 return rewriteValueAMD64_OpAMD64BTSQconst(v) 93 case OpAMD64CMOVLCC: 94 return rewriteValueAMD64_OpAMD64CMOVLCC(v) 95 case OpAMD64CMOVLCS: 96 return rewriteValueAMD64_OpAMD64CMOVLCS(v) 97 case OpAMD64CMOVLEQ: 98 return rewriteValueAMD64_OpAMD64CMOVLEQ(v) 99 case OpAMD64CMOVLGE: 100 return rewriteValueAMD64_OpAMD64CMOVLGE(v) 101 case OpAMD64CMOVLGT: 102 return rewriteValueAMD64_OpAMD64CMOVLGT(v) 103 case OpAMD64CMOVLHI: 104 return rewriteValueAMD64_OpAMD64CMOVLHI(v) 105 case OpAMD64CMOVLLE: 106 return rewriteValueAMD64_OpAMD64CMOVLLE(v) 107 case OpAMD64CMOVLLS: 108 return rewriteValueAMD64_OpAMD64CMOVLLS(v) 109 case OpAMD64CMOVLLT: 110 return rewriteValueAMD64_OpAMD64CMOVLLT(v) 111 case OpAMD64CMOVLNE: 112 return rewriteValueAMD64_OpAMD64CMOVLNE(v) 113 case OpAMD64CMOVQCC: 114 return rewriteValueAMD64_OpAMD64CMOVQCC(v) 115 case OpAMD64CMOVQCS: 116 return rewriteValueAMD64_OpAMD64CMOVQCS(v) 117 case OpAMD64CMOVQEQ: 118 return rewriteValueAMD64_OpAMD64CMOVQEQ(v) 119 case OpAMD64CMOVQGE: 120 return rewriteValueAMD64_OpAMD64CMOVQGE(v) 121 case OpAMD64CMOVQGT: 122 return rewriteValueAMD64_OpAMD64CMOVQGT(v) 123 case OpAMD64CMOVQHI: 124 return rewriteValueAMD64_OpAMD64CMOVQHI(v) 125 case OpAMD64CMOVQLE: 126 return rewriteValueAMD64_OpAMD64CMOVQLE(v) 127 case OpAMD64CMOVQLS: 128 return rewriteValueAMD64_OpAMD64CMOVQLS(v) 129 case OpAMD64CMOVQLT: 130 return rewriteValueAMD64_OpAMD64CMOVQLT(v) 131 case OpAMD64CMOVQNE: 132 return rewriteValueAMD64_OpAMD64CMOVQNE(v) 133 case OpAMD64CMOVWCC: 134 return rewriteValueAMD64_OpAMD64CMOVWCC(v) 135 case OpAMD64CMOVWCS: 136 return rewriteValueAMD64_OpAMD64CMOVWCS(v) 137 case OpAMD64CMOVWEQ: 138 return rewriteValueAMD64_OpAMD64CMOVWEQ(v) 139 case OpAMD64CMOVWGE: 140 return rewriteValueAMD64_OpAMD64CMOVWGE(v) 141 case OpAMD64CMOVWGT: 142 return rewriteValueAMD64_OpAMD64CMOVWGT(v) 143 case OpAMD64CMOVWHI: 144 return rewriteValueAMD64_OpAMD64CMOVWHI(v) 145 case OpAMD64CMOVWLE: 146 return rewriteValueAMD64_OpAMD64CMOVWLE(v) 147 case OpAMD64CMOVWLS: 148 return rewriteValueAMD64_OpAMD64CMOVWLS(v) 149 case OpAMD64CMOVWLT: 150 return rewriteValueAMD64_OpAMD64CMOVWLT(v) 151 case OpAMD64CMOVWNE: 152 return rewriteValueAMD64_OpAMD64CMOVWNE(v) 153 case OpAMD64CMPB: 154 return rewriteValueAMD64_OpAMD64CMPB(v) 155 case OpAMD64CMPBconst: 156 return rewriteValueAMD64_OpAMD64CMPBconst(v) 157 case OpAMD64CMPBconstload: 158 return rewriteValueAMD64_OpAMD64CMPBconstload(v) 159 case OpAMD64CMPBload: 160 return rewriteValueAMD64_OpAMD64CMPBload(v) 161 case OpAMD64CMPL: 162 return rewriteValueAMD64_OpAMD64CMPL(v) 163 case OpAMD64CMPLconst: 164 return rewriteValueAMD64_OpAMD64CMPLconst(v) 165 case OpAMD64CMPLconstload: 166 return rewriteValueAMD64_OpAMD64CMPLconstload(v) 167 case OpAMD64CMPLload: 168 return rewriteValueAMD64_OpAMD64CMPLload(v) 169 case OpAMD64CMPQ: 170 return rewriteValueAMD64_OpAMD64CMPQ(v) 171 case OpAMD64CMPQconst: 172 return rewriteValueAMD64_OpAMD64CMPQconst(v) 173 case OpAMD64CMPQconstload: 174 return rewriteValueAMD64_OpAMD64CMPQconstload(v) 175 case OpAMD64CMPQload: 176 return rewriteValueAMD64_OpAMD64CMPQload(v) 177 case OpAMD64CMPW: 178 return rewriteValueAMD64_OpAMD64CMPW(v) 179 case OpAMD64CMPWconst: 180 return rewriteValueAMD64_OpAMD64CMPWconst(v) 181 case OpAMD64CMPWconstload: 182 return rewriteValueAMD64_OpAMD64CMPWconstload(v) 183 case OpAMD64CMPWload: 184 return rewriteValueAMD64_OpAMD64CMPWload(v) 185 case OpAMD64CMPXCHGLlock: 186 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v) 187 case OpAMD64CMPXCHGQlock: 188 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v) 189 case OpAMD64DIVSD: 190 return rewriteValueAMD64_OpAMD64DIVSD(v) 191 case OpAMD64DIVSDload: 192 return rewriteValueAMD64_OpAMD64DIVSDload(v) 193 case OpAMD64DIVSS: 194 return rewriteValueAMD64_OpAMD64DIVSS(v) 195 case OpAMD64DIVSSload: 196 return rewriteValueAMD64_OpAMD64DIVSSload(v) 197 case OpAMD64HMULL: 198 return rewriteValueAMD64_OpAMD64HMULL(v) 199 case OpAMD64HMULLU: 200 return rewriteValueAMD64_OpAMD64HMULLU(v) 201 case OpAMD64HMULQ: 202 return rewriteValueAMD64_OpAMD64HMULQ(v) 203 case OpAMD64HMULQU: 204 return rewriteValueAMD64_OpAMD64HMULQU(v) 205 case OpAMD64LEAL: 206 return rewriteValueAMD64_OpAMD64LEAL(v) 207 case OpAMD64LEAL1: 208 return rewriteValueAMD64_OpAMD64LEAL1(v) 209 case OpAMD64LEAL2: 210 return rewriteValueAMD64_OpAMD64LEAL2(v) 211 case OpAMD64LEAL4: 212 return rewriteValueAMD64_OpAMD64LEAL4(v) 213 case OpAMD64LEAL8: 214 return rewriteValueAMD64_OpAMD64LEAL8(v) 215 case OpAMD64LEAQ: 216 return rewriteValueAMD64_OpAMD64LEAQ(v) 217 case OpAMD64LEAQ1: 218 return rewriteValueAMD64_OpAMD64LEAQ1(v) 219 case OpAMD64LEAQ2: 220 return rewriteValueAMD64_OpAMD64LEAQ2(v) 221 case OpAMD64LEAQ4: 222 return rewriteValueAMD64_OpAMD64LEAQ4(v) 223 case OpAMD64LEAQ8: 224 return rewriteValueAMD64_OpAMD64LEAQ8(v) 225 case OpAMD64MOVBELstore: 226 return rewriteValueAMD64_OpAMD64MOVBELstore(v) 227 case OpAMD64MOVBEQstore: 228 return rewriteValueAMD64_OpAMD64MOVBEQstore(v) 229 case OpAMD64MOVBEWstore: 230 return rewriteValueAMD64_OpAMD64MOVBEWstore(v) 231 case OpAMD64MOVBQSX: 232 return rewriteValueAMD64_OpAMD64MOVBQSX(v) 233 case OpAMD64MOVBQSXload: 234 return rewriteValueAMD64_OpAMD64MOVBQSXload(v) 235 case OpAMD64MOVBQZX: 236 return rewriteValueAMD64_OpAMD64MOVBQZX(v) 237 case OpAMD64MOVBatomicload: 238 return rewriteValueAMD64_OpAMD64MOVBatomicload(v) 239 case OpAMD64MOVBload: 240 return rewriteValueAMD64_OpAMD64MOVBload(v) 241 case OpAMD64MOVBstore: 242 return rewriteValueAMD64_OpAMD64MOVBstore(v) 243 case OpAMD64MOVBstoreconst: 244 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v) 245 case OpAMD64MOVLQSX: 246 return rewriteValueAMD64_OpAMD64MOVLQSX(v) 247 case OpAMD64MOVLQSXload: 248 return rewriteValueAMD64_OpAMD64MOVLQSXload(v) 249 case OpAMD64MOVLQZX: 250 return rewriteValueAMD64_OpAMD64MOVLQZX(v) 251 case OpAMD64MOVLatomicload: 252 return rewriteValueAMD64_OpAMD64MOVLatomicload(v) 253 case OpAMD64MOVLf2i: 254 return rewriteValueAMD64_OpAMD64MOVLf2i(v) 255 case OpAMD64MOVLi2f: 256 return rewriteValueAMD64_OpAMD64MOVLi2f(v) 257 case OpAMD64MOVLload: 258 return rewriteValueAMD64_OpAMD64MOVLload(v) 259 case OpAMD64MOVLstore: 260 return rewriteValueAMD64_OpAMD64MOVLstore(v) 261 case OpAMD64MOVLstoreconst: 262 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v) 263 case OpAMD64MOVOload: 264 return rewriteValueAMD64_OpAMD64MOVOload(v) 265 case OpAMD64MOVOstore: 266 return rewriteValueAMD64_OpAMD64MOVOstore(v) 267 case OpAMD64MOVOstoreconst: 268 return rewriteValueAMD64_OpAMD64MOVOstoreconst(v) 269 case OpAMD64MOVQatomicload: 270 return rewriteValueAMD64_OpAMD64MOVQatomicload(v) 271 case OpAMD64MOVQf2i: 272 return rewriteValueAMD64_OpAMD64MOVQf2i(v) 273 case OpAMD64MOVQi2f: 274 return rewriteValueAMD64_OpAMD64MOVQi2f(v) 275 case OpAMD64MOVQload: 276 return rewriteValueAMD64_OpAMD64MOVQload(v) 277 case OpAMD64MOVQstore: 278 return rewriteValueAMD64_OpAMD64MOVQstore(v) 279 case OpAMD64MOVQstoreconst: 280 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v) 281 case OpAMD64MOVSDload: 282 return rewriteValueAMD64_OpAMD64MOVSDload(v) 283 case OpAMD64MOVSDstore: 284 return rewriteValueAMD64_OpAMD64MOVSDstore(v) 285 case OpAMD64MOVSSload: 286 return rewriteValueAMD64_OpAMD64MOVSSload(v) 287 case OpAMD64MOVSSstore: 288 return rewriteValueAMD64_OpAMD64MOVSSstore(v) 289 case OpAMD64MOVWQSX: 290 return rewriteValueAMD64_OpAMD64MOVWQSX(v) 291 case OpAMD64MOVWQSXload: 292 return rewriteValueAMD64_OpAMD64MOVWQSXload(v) 293 case OpAMD64MOVWQZX: 294 return rewriteValueAMD64_OpAMD64MOVWQZX(v) 295 case OpAMD64MOVWload: 296 return rewriteValueAMD64_OpAMD64MOVWload(v) 297 case OpAMD64MOVWstore: 298 return rewriteValueAMD64_OpAMD64MOVWstore(v) 299 case OpAMD64MOVWstoreconst: 300 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v) 301 case OpAMD64MULL: 302 return rewriteValueAMD64_OpAMD64MULL(v) 303 case OpAMD64MULLconst: 304 return rewriteValueAMD64_OpAMD64MULLconst(v) 305 case OpAMD64MULQ: 306 return rewriteValueAMD64_OpAMD64MULQ(v) 307 case OpAMD64MULQconst: 308 return rewriteValueAMD64_OpAMD64MULQconst(v) 309 case OpAMD64MULSD: 310 return rewriteValueAMD64_OpAMD64MULSD(v) 311 case OpAMD64MULSDload: 312 return rewriteValueAMD64_OpAMD64MULSDload(v) 313 case OpAMD64MULSS: 314 return rewriteValueAMD64_OpAMD64MULSS(v) 315 case OpAMD64MULSSload: 316 return rewriteValueAMD64_OpAMD64MULSSload(v) 317 case OpAMD64NEGL: 318 return rewriteValueAMD64_OpAMD64NEGL(v) 319 case OpAMD64NEGQ: 320 return rewriteValueAMD64_OpAMD64NEGQ(v) 321 case OpAMD64NOTL: 322 return rewriteValueAMD64_OpAMD64NOTL(v) 323 case OpAMD64NOTQ: 324 return rewriteValueAMD64_OpAMD64NOTQ(v) 325 case OpAMD64ORL: 326 return rewriteValueAMD64_OpAMD64ORL(v) 327 case OpAMD64ORLconst: 328 return rewriteValueAMD64_OpAMD64ORLconst(v) 329 case OpAMD64ORLconstmodify: 330 return rewriteValueAMD64_OpAMD64ORLconstmodify(v) 331 case OpAMD64ORLload: 332 return rewriteValueAMD64_OpAMD64ORLload(v) 333 case OpAMD64ORLmodify: 334 return rewriteValueAMD64_OpAMD64ORLmodify(v) 335 case OpAMD64ORQ: 336 return rewriteValueAMD64_OpAMD64ORQ(v) 337 case OpAMD64ORQconst: 338 return rewriteValueAMD64_OpAMD64ORQconst(v) 339 case OpAMD64ORQconstmodify: 340 return rewriteValueAMD64_OpAMD64ORQconstmodify(v) 341 case OpAMD64ORQload: 342 return rewriteValueAMD64_OpAMD64ORQload(v) 343 case OpAMD64ORQmodify: 344 return rewriteValueAMD64_OpAMD64ORQmodify(v) 345 case OpAMD64ROLB: 346 return rewriteValueAMD64_OpAMD64ROLB(v) 347 case OpAMD64ROLBconst: 348 return rewriteValueAMD64_OpAMD64ROLBconst(v) 349 case OpAMD64ROLL: 350 return rewriteValueAMD64_OpAMD64ROLL(v) 351 case OpAMD64ROLLconst: 352 return rewriteValueAMD64_OpAMD64ROLLconst(v) 353 case OpAMD64ROLQ: 354 return rewriteValueAMD64_OpAMD64ROLQ(v) 355 case OpAMD64ROLQconst: 356 return rewriteValueAMD64_OpAMD64ROLQconst(v) 357 case OpAMD64ROLW: 358 return rewriteValueAMD64_OpAMD64ROLW(v) 359 case OpAMD64ROLWconst: 360 return rewriteValueAMD64_OpAMD64ROLWconst(v) 361 case OpAMD64RORB: 362 return rewriteValueAMD64_OpAMD64RORB(v) 363 case OpAMD64RORL: 364 return rewriteValueAMD64_OpAMD64RORL(v) 365 case OpAMD64RORQ: 366 return rewriteValueAMD64_OpAMD64RORQ(v) 367 case OpAMD64RORW: 368 return rewriteValueAMD64_OpAMD64RORW(v) 369 case OpAMD64SARB: 370 return rewriteValueAMD64_OpAMD64SARB(v) 371 case OpAMD64SARBconst: 372 return rewriteValueAMD64_OpAMD64SARBconst(v) 373 case OpAMD64SARL: 374 return rewriteValueAMD64_OpAMD64SARL(v) 375 case OpAMD64SARLconst: 376 return rewriteValueAMD64_OpAMD64SARLconst(v) 377 case OpAMD64SARQ: 378 return rewriteValueAMD64_OpAMD64SARQ(v) 379 case OpAMD64SARQconst: 380 return rewriteValueAMD64_OpAMD64SARQconst(v) 381 case OpAMD64SARW: 382 return rewriteValueAMD64_OpAMD64SARW(v) 383 case OpAMD64SARWconst: 384 return rewriteValueAMD64_OpAMD64SARWconst(v) 385 case OpAMD64SARXLload: 386 return rewriteValueAMD64_OpAMD64SARXLload(v) 387 case OpAMD64SARXQload: 388 return rewriteValueAMD64_OpAMD64SARXQload(v) 389 case OpAMD64SBBLcarrymask: 390 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v) 391 case OpAMD64SBBQ: 392 return rewriteValueAMD64_OpAMD64SBBQ(v) 393 case OpAMD64SBBQcarrymask: 394 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v) 395 case OpAMD64SBBQconst: 396 return rewriteValueAMD64_OpAMD64SBBQconst(v) 397 case OpAMD64SETA: 398 return rewriteValueAMD64_OpAMD64SETA(v) 399 case OpAMD64SETAE: 400 return rewriteValueAMD64_OpAMD64SETAE(v) 401 case OpAMD64SETAEstore: 402 return rewriteValueAMD64_OpAMD64SETAEstore(v) 403 case OpAMD64SETAstore: 404 return rewriteValueAMD64_OpAMD64SETAstore(v) 405 case OpAMD64SETB: 406 return rewriteValueAMD64_OpAMD64SETB(v) 407 case OpAMD64SETBE: 408 return rewriteValueAMD64_OpAMD64SETBE(v) 409 case OpAMD64SETBEstore: 410 return rewriteValueAMD64_OpAMD64SETBEstore(v) 411 case OpAMD64SETBstore: 412 return rewriteValueAMD64_OpAMD64SETBstore(v) 413 case OpAMD64SETEQ: 414 return rewriteValueAMD64_OpAMD64SETEQ(v) 415 case OpAMD64SETEQstore: 416 return rewriteValueAMD64_OpAMD64SETEQstore(v) 417 case OpAMD64SETG: 418 return rewriteValueAMD64_OpAMD64SETG(v) 419 case OpAMD64SETGE: 420 return rewriteValueAMD64_OpAMD64SETGE(v) 421 case OpAMD64SETGEstore: 422 return rewriteValueAMD64_OpAMD64SETGEstore(v) 423 case OpAMD64SETGstore: 424 return rewriteValueAMD64_OpAMD64SETGstore(v) 425 case OpAMD64SETL: 426 return rewriteValueAMD64_OpAMD64SETL(v) 427 case OpAMD64SETLE: 428 return rewriteValueAMD64_OpAMD64SETLE(v) 429 case OpAMD64SETLEstore: 430 return rewriteValueAMD64_OpAMD64SETLEstore(v) 431 case OpAMD64SETLstore: 432 return rewriteValueAMD64_OpAMD64SETLstore(v) 433 case OpAMD64SETNE: 434 return rewriteValueAMD64_OpAMD64SETNE(v) 435 case OpAMD64SETNEstore: 436 return rewriteValueAMD64_OpAMD64SETNEstore(v) 437 case OpAMD64SHLL: 438 return rewriteValueAMD64_OpAMD64SHLL(v) 439 case OpAMD64SHLLconst: 440 return rewriteValueAMD64_OpAMD64SHLLconst(v) 441 case OpAMD64SHLQ: 442 return rewriteValueAMD64_OpAMD64SHLQ(v) 443 case OpAMD64SHLQconst: 444 return rewriteValueAMD64_OpAMD64SHLQconst(v) 445 case OpAMD64SHLXLload: 446 return rewriteValueAMD64_OpAMD64SHLXLload(v) 447 case OpAMD64SHLXQload: 448 return rewriteValueAMD64_OpAMD64SHLXQload(v) 449 case OpAMD64SHRB: 450 return rewriteValueAMD64_OpAMD64SHRB(v) 451 case OpAMD64SHRBconst: 452 return rewriteValueAMD64_OpAMD64SHRBconst(v) 453 case OpAMD64SHRL: 454 return rewriteValueAMD64_OpAMD64SHRL(v) 455 case OpAMD64SHRLconst: 456 return rewriteValueAMD64_OpAMD64SHRLconst(v) 457 case OpAMD64SHRQ: 458 return rewriteValueAMD64_OpAMD64SHRQ(v) 459 case OpAMD64SHRQconst: 460 return rewriteValueAMD64_OpAMD64SHRQconst(v) 461 case OpAMD64SHRW: 462 return rewriteValueAMD64_OpAMD64SHRW(v) 463 case OpAMD64SHRWconst: 464 return rewriteValueAMD64_OpAMD64SHRWconst(v) 465 case OpAMD64SHRXLload: 466 return rewriteValueAMD64_OpAMD64SHRXLload(v) 467 case OpAMD64SHRXQload: 468 return rewriteValueAMD64_OpAMD64SHRXQload(v) 469 case OpAMD64SUBL: 470 return rewriteValueAMD64_OpAMD64SUBL(v) 471 case OpAMD64SUBLconst: 472 return rewriteValueAMD64_OpAMD64SUBLconst(v) 473 case OpAMD64SUBLload: 474 return rewriteValueAMD64_OpAMD64SUBLload(v) 475 case OpAMD64SUBLmodify: 476 return rewriteValueAMD64_OpAMD64SUBLmodify(v) 477 case OpAMD64SUBQ: 478 return rewriteValueAMD64_OpAMD64SUBQ(v) 479 case OpAMD64SUBQborrow: 480 return rewriteValueAMD64_OpAMD64SUBQborrow(v) 481 case OpAMD64SUBQconst: 482 return rewriteValueAMD64_OpAMD64SUBQconst(v) 483 case OpAMD64SUBQload: 484 return rewriteValueAMD64_OpAMD64SUBQload(v) 485 case OpAMD64SUBQmodify: 486 return rewriteValueAMD64_OpAMD64SUBQmodify(v) 487 case OpAMD64SUBSD: 488 return rewriteValueAMD64_OpAMD64SUBSD(v) 489 case OpAMD64SUBSDload: 490 return rewriteValueAMD64_OpAMD64SUBSDload(v) 491 case OpAMD64SUBSS: 492 return rewriteValueAMD64_OpAMD64SUBSS(v) 493 case OpAMD64SUBSSload: 494 return rewriteValueAMD64_OpAMD64SUBSSload(v) 495 case OpAMD64TESTB: 496 return rewriteValueAMD64_OpAMD64TESTB(v) 497 case OpAMD64TESTBconst: 498 return rewriteValueAMD64_OpAMD64TESTBconst(v) 499 case OpAMD64TESTL: 500 return rewriteValueAMD64_OpAMD64TESTL(v) 501 case OpAMD64TESTLconst: 502 return rewriteValueAMD64_OpAMD64TESTLconst(v) 503 case OpAMD64TESTQ: 504 return rewriteValueAMD64_OpAMD64TESTQ(v) 505 case OpAMD64TESTQconst: 506 return rewriteValueAMD64_OpAMD64TESTQconst(v) 507 case OpAMD64TESTW: 508 return rewriteValueAMD64_OpAMD64TESTW(v) 509 case OpAMD64TESTWconst: 510 return rewriteValueAMD64_OpAMD64TESTWconst(v) 511 case OpAMD64XADDLlock: 512 return rewriteValueAMD64_OpAMD64XADDLlock(v) 513 case OpAMD64XADDQlock: 514 return rewriteValueAMD64_OpAMD64XADDQlock(v) 515 case OpAMD64XCHGL: 516 return rewriteValueAMD64_OpAMD64XCHGL(v) 517 case OpAMD64XCHGQ: 518 return rewriteValueAMD64_OpAMD64XCHGQ(v) 519 case OpAMD64XORL: 520 return rewriteValueAMD64_OpAMD64XORL(v) 521 case OpAMD64XORLconst: 522 return rewriteValueAMD64_OpAMD64XORLconst(v) 523 case OpAMD64XORLconstmodify: 524 return rewriteValueAMD64_OpAMD64XORLconstmodify(v) 525 case OpAMD64XORLload: 526 return rewriteValueAMD64_OpAMD64XORLload(v) 527 case OpAMD64XORLmodify: 528 return rewriteValueAMD64_OpAMD64XORLmodify(v) 529 case OpAMD64XORQ: 530 return rewriteValueAMD64_OpAMD64XORQ(v) 531 case OpAMD64XORQconst: 532 return rewriteValueAMD64_OpAMD64XORQconst(v) 533 case OpAMD64XORQconstmodify: 534 return rewriteValueAMD64_OpAMD64XORQconstmodify(v) 535 case OpAMD64XORQload: 536 return rewriteValueAMD64_OpAMD64XORQload(v) 537 case OpAMD64XORQmodify: 538 return rewriteValueAMD64_OpAMD64XORQmodify(v) 539 case OpAdd16: 540 v.Op = OpAMD64ADDL 541 return true 542 case OpAdd32: 543 v.Op = OpAMD64ADDL 544 return true 545 case OpAdd32F: 546 v.Op = OpAMD64ADDSS 547 return true 548 case OpAdd64: 549 v.Op = OpAMD64ADDQ 550 return true 551 case OpAdd64F: 552 v.Op = OpAMD64ADDSD 553 return true 554 case OpAdd8: 555 v.Op = OpAMD64ADDL 556 return true 557 case OpAddPtr: 558 v.Op = OpAMD64ADDQ 559 return true 560 case OpAddr: 561 return rewriteValueAMD64_OpAddr(v) 562 case OpAnd16: 563 v.Op = OpAMD64ANDL 564 return true 565 case OpAnd32: 566 v.Op = OpAMD64ANDL 567 return true 568 case OpAnd64: 569 v.Op = OpAMD64ANDQ 570 return true 571 case OpAnd8: 572 v.Op = OpAMD64ANDL 573 return true 574 case OpAndB: 575 v.Op = OpAMD64ANDL 576 return true 577 case OpAtomicAdd32: 578 return rewriteValueAMD64_OpAtomicAdd32(v) 579 case OpAtomicAdd64: 580 return rewriteValueAMD64_OpAtomicAdd64(v) 581 case OpAtomicAnd32: 582 return rewriteValueAMD64_OpAtomicAnd32(v) 583 case OpAtomicAnd8: 584 return rewriteValueAMD64_OpAtomicAnd8(v) 585 case OpAtomicCompareAndSwap32: 586 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v) 587 case OpAtomicCompareAndSwap64: 588 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v) 589 case OpAtomicExchange32: 590 return rewriteValueAMD64_OpAtomicExchange32(v) 591 case OpAtomicExchange64: 592 return rewriteValueAMD64_OpAtomicExchange64(v) 593 case OpAtomicLoad32: 594 return rewriteValueAMD64_OpAtomicLoad32(v) 595 case OpAtomicLoad64: 596 return rewriteValueAMD64_OpAtomicLoad64(v) 597 case OpAtomicLoad8: 598 return rewriteValueAMD64_OpAtomicLoad8(v) 599 case OpAtomicLoadPtr: 600 return rewriteValueAMD64_OpAtomicLoadPtr(v) 601 case OpAtomicOr32: 602 return rewriteValueAMD64_OpAtomicOr32(v) 603 case OpAtomicOr8: 604 return rewriteValueAMD64_OpAtomicOr8(v) 605 case OpAtomicStore32: 606 return rewriteValueAMD64_OpAtomicStore32(v) 607 case OpAtomicStore64: 608 return rewriteValueAMD64_OpAtomicStore64(v) 609 case OpAtomicStore8: 610 return rewriteValueAMD64_OpAtomicStore8(v) 611 case OpAtomicStorePtrNoWB: 612 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v) 613 case OpAvg64u: 614 v.Op = OpAMD64AVGQU 615 return true 616 case OpBitLen16: 617 return rewriteValueAMD64_OpBitLen16(v) 618 case OpBitLen32: 619 return rewriteValueAMD64_OpBitLen32(v) 620 case OpBitLen64: 621 return rewriteValueAMD64_OpBitLen64(v) 622 case OpBitLen8: 623 return rewriteValueAMD64_OpBitLen8(v) 624 case OpBswap32: 625 v.Op = OpAMD64BSWAPL 626 return true 627 case OpBswap64: 628 v.Op = OpAMD64BSWAPQ 629 return true 630 case OpCeil: 631 return rewriteValueAMD64_OpCeil(v) 632 case OpClosureCall: 633 v.Op = OpAMD64CALLclosure 634 return true 635 case OpCom16: 636 v.Op = OpAMD64NOTL 637 return true 638 case OpCom32: 639 v.Op = OpAMD64NOTL 640 return true 641 case OpCom64: 642 v.Op = OpAMD64NOTQ 643 return true 644 case OpCom8: 645 v.Op = OpAMD64NOTL 646 return true 647 case OpCondSelect: 648 return rewriteValueAMD64_OpCondSelect(v) 649 case OpConst16: 650 return rewriteValueAMD64_OpConst16(v) 651 case OpConst32: 652 v.Op = OpAMD64MOVLconst 653 return true 654 case OpConst32F: 655 v.Op = OpAMD64MOVSSconst 656 return true 657 case OpConst64: 658 v.Op = OpAMD64MOVQconst 659 return true 660 case OpConst64F: 661 v.Op = OpAMD64MOVSDconst 662 return true 663 case OpConst8: 664 return rewriteValueAMD64_OpConst8(v) 665 case OpConstBool: 666 return rewriteValueAMD64_OpConstBool(v) 667 case OpConstNil: 668 return rewriteValueAMD64_OpConstNil(v) 669 case OpCtz16: 670 return rewriteValueAMD64_OpCtz16(v) 671 case OpCtz16NonZero: 672 return rewriteValueAMD64_OpCtz16NonZero(v) 673 case OpCtz32: 674 return rewriteValueAMD64_OpCtz32(v) 675 case OpCtz32NonZero: 676 return rewriteValueAMD64_OpCtz32NonZero(v) 677 case OpCtz64: 678 return rewriteValueAMD64_OpCtz64(v) 679 case OpCtz64NonZero: 680 return rewriteValueAMD64_OpCtz64NonZero(v) 681 case OpCtz8: 682 return rewriteValueAMD64_OpCtz8(v) 683 case OpCtz8NonZero: 684 return rewriteValueAMD64_OpCtz8NonZero(v) 685 case OpCvt32Fto32: 686 v.Op = OpAMD64CVTTSS2SL 687 return true 688 case OpCvt32Fto64: 689 v.Op = OpAMD64CVTTSS2SQ 690 return true 691 case OpCvt32Fto64F: 692 v.Op = OpAMD64CVTSS2SD 693 return true 694 case OpCvt32to32F: 695 v.Op = OpAMD64CVTSL2SS 696 return true 697 case OpCvt32to64F: 698 v.Op = OpAMD64CVTSL2SD 699 return true 700 case OpCvt64Fto32: 701 v.Op = OpAMD64CVTTSD2SL 702 return true 703 case OpCvt64Fto32F: 704 v.Op = OpAMD64CVTSD2SS 705 return true 706 case OpCvt64Fto64: 707 v.Op = OpAMD64CVTTSD2SQ 708 return true 709 case OpCvt64to32F: 710 v.Op = OpAMD64CVTSQ2SS 711 return true 712 case OpCvt64to64F: 713 v.Op = OpAMD64CVTSQ2SD 714 return true 715 case OpCvtBoolToUint8: 716 v.Op = OpCopy 717 return true 718 case OpDiv128u: 719 v.Op = OpAMD64DIVQU2 720 return true 721 case OpDiv16: 722 return rewriteValueAMD64_OpDiv16(v) 723 case OpDiv16u: 724 return rewriteValueAMD64_OpDiv16u(v) 725 case OpDiv32: 726 return rewriteValueAMD64_OpDiv32(v) 727 case OpDiv32F: 728 v.Op = OpAMD64DIVSS 729 return true 730 case OpDiv32u: 731 return rewriteValueAMD64_OpDiv32u(v) 732 case OpDiv64: 733 return rewriteValueAMD64_OpDiv64(v) 734 case OpDiv64F: 735 v.Op = OpAMD64DIVSD 736 return true 737 case OpDiv64u: 738 return rewriteValueAMD64_OpDiv64u(v) 739 case OpDiv8: 740 return rewriteValueAMD64_OpDiv8(v) 741 case OpDiv8u: 742 return rewriteValueAMD64_OpDiv8u(v) 743 case OpEq16: 744 return rewriteValueAMD64_OpEq16(v) 745 case OpEq32: 746 return rewriteValueAMD64_OpEq32(v) 747 case OpEq32F: 748 return rewriteValueAMD64_OpEq32F(v) 749 case OpEq64: 750 return rewriteValueAMD64_OpEq64(v) 751 case OpEq64F: 752 return rewriteValueAMD64_OpEq64F(v) 753 case OpEq8: 754 return rewriteValueAMD64_OpEq8(v) 755 case OpEqB: 756 return rewriteValueAMD64_OpEqB(v) 757 case OpEqPtr: 758 return rewriteValueAMD64_OpEqPtr(v) 759 case OpFMA: 760 return rewriteValueAMD64_OpFMA(v) 761 case OpFloor: 762 return rewriteValueAMD64_OpFloor(v) 763 case OpGetCallerPC: 764 v.Op = OpAMD64LoweredGetCallerPC 765 return true 766 case OpGetCallerSP: 767 v.Op = OpAMD64LoweredGetCallerSP 768 return true 769 case OpGetClosurePtr: 770 v.Op = OpAMD64LoweredGetClosurePtr 771 return true 772 case OpGetG: 773 return rewriteValueAMD64_OpGetG(v) 774 case OpHasCPUFeature: 775 return rewriteValueAMD64_OpHasCPUFeature(v) 776 case OpHmul32: 777 v.Op = OpAMD64HMULL 778 return true 779 case OpHmul32u: 780 v.Op = OpAMD64HMULLU 781 return true 782 case OpHmul64: 783 v.Op = OpAMD64HMULQ 784 return true 785 case OpHmul64u: 786 v.Op = OpAMD64HMULQU 787 return true 788 case OpInterCall: 789 v.Op = OpAMD64CALLinter 790 return true 791 case OpIsInBounds: 792 return rewriteValueAMD64_OpIsInBounds(v) 793 case OpIsNonNil: 794 return rewriteValueAMD64_OpIsNonNil(v) 795 case OpIsSliceInBounds: 796 return rewriteValueAMD64_OpIsSliceInBounds(v) 797 case OpLeq16: 798 return rewriteValueAMD64_OpLeq16(v) 799 case OpLeq16U: 800 return rewriteValueAMD64_OpLeq16U(v) 801 case OpLeq32: 802 return rewriteValueAMD64_OpLeq32(v) 803 case OpLeq32F: 804 return rewriteValueAMD64_OpLeq32F(v) 805 case OpLeq32U: 806 return rewriteValueAMD64_OpLeq32U(v) 807 case OpLeq64: 808 return rewriteValueAMD64_OpLeq64(v) 809 case OpLeq64F: 810 return rewriteValueAMD64_OpLeq64F(v) 811 case OpLeq64U: 812 return rewriteValueAMD64_OpLeq64U(v) 813 case OpLeq8: 814 return rewriteValueAMD64_OpLeq8(v) 815 case OpLeq8U: 816 return rewriteValueAMD64_OpLeq8U(v) 817 case OpLess16: 818 return rewriteValueAMD64_OpLess16(v) 819 case OpLess16U: 820 return rewriteValueAMD64_OpLess16U(v) 821 case OpLess32: 822 return rewriteValueAMD64_OpLess32(v) 823 case OpLess32F: 824 return rewriteValueAMD64_OpLess32F(v) 825 case OpLess32U: 826 return rewriteValueAMD64_OpLess32U(v) 827 case OpLess64: 828 return rewriteValueAMD64_OpLess64(v) 829 case OpLess64F: 830 return rewriteValueAMD64_OpLess64F(v) 831 case OpLess64U: 832 return rewriteValueAMD64_OpLess64U(v) 833 case OpLess8: 834 return rewriteValueAMD64_OpLess8(v) 835 case OpLess8U: 836 return rewriteValueAMD64_OpLess8U(v) 837 case OpLoad: 838 return rewriteValueAMD64_OpLoad(v) 839 case OpLocalAddr: 840 return rewriteValueAMD64_OpLocalAddr(v) 841 case OpLsh16x16: 842 return rewriteValueAMD64_OpLsh16x16(v) 843 case OpLsh16x32: 844 return rewriteValueAMD64_OpLsh16x32(v) 845 case OpLsh16x64: 846 return rewriteValueAMD64_OpLsh16x64(v) 847 case OpLsh16x8: 848 return rewriteValueAMD64_OpLsh16x8(v) 849 case OpLsh32x16: 850 return rewriteValueAMD64_OpLsh32x16(v) 851 case OpLsh32x32: 852 return rewriteValueAMD64_OpLsh32x32(v) 853 case OpLsh32x64: 854 return rewriteValueAMD64_OpLsh32x64(v) 855 case OpLsh32x8: 856 return rewriteValueAMD64_OpLsh32x8(v) 857 case OpLsh64x16: 858 return rewriteValueAMD64_OpLsh64x16(v) 859 case OpLsh64x32: 860 return rewriteValueAMD64_OpLsh64x32(v) 861 case OpLsh64x64: 862 return rewriteValueAMD64_OpLsh64x64(v) 863 case OpLsh64x8: 864 return rewriteValueAMD64_OpLsh64x8(v) 865 case OpLsh8x16: 866 return rewriteValueAMD64_OpLsh8x16(v) 867 case OpLsh8x32: 868 return rewriteValueAMD64_OpLsh8x32(v) 869 case OpLsh8x64: 870 return rewriteValueAMD64_OpLsh8x64(v) 871 case OpLsh8x8: 872 return rewriteValueAMD64_OpLsh8x8(v) 873 case OpMod16: 874 return rewriteValueAMD64_OpMod16(v) 875 case OpMod16u: 876 return rewriteValueAMD64_OpMod16u(v) 877 case OpMod32: 878 return rewriteValueAMD64_OpMod32(v) 879 case OpMod32u: 880 return rewriteValueAMD64_OpMod32u(v) 881 case OpMod64: 882 return rewriteValueAMD64_OpMod64(v) 883 case OpMod64u: 884 return rewriteValueAMD64_OpMod64u(v) 885 case OpMod8: 886 return rewriteValueAMD64_OpMod8(v) 887 case OpMod8u: 888 return rewriteValueAMD64_OpMod8u(v) 889 case OpMove: 890 return rewriteValueAMD64_OpMove(v) 891 case OpMul16: 892 v.Op = OpAMD64MULL 893 return true 894 case OpMul32: 895 v.Op = OpAMD64MULL 896 return true 897 case OpMul32F: 898 v.Op = OpAMD64MULSS 899 return true 900 case OpMul64: 901 v.Op = OpAMD64MULQ 902 return true 903 case OpMul64F: 904 v.Op = OpAMD64MULSD 905 return true 906 case OpMul64uhilo: 907 v.Op = OpAMD64MULQU2 908 return true 909 case OpMul8: 910 v.Op = OpAMD64MULL 911 return true 912 case OpNeg16: 913 v.Op = OpAMD64NEGL 914 return true 915 case OpNeg32: 916 v.Op = OpAMD64NEGL 917 return true 918 case OpNeg32F: 919 return rewriteValueAMD64_OpNeg32F(v) 920 case OpNeg64: 921 v.Op = OpAMD64NEGQ 922 return true 923 case OpNeg64F: 924 return rewriteValueAMD64_OpNeg64F(v) 925 case OpNeg8: 926 v.Op = OpAMD64NEGL 927 return true 928 case OpNeq16: 929 return rewriteValueAMD64_OpNeq16(v) 930 case OpNeq32: 931 return rewriteValueAMD64_OpNeq32(v) 932 case OpNeq32F: 933 return rewriteValueAMD64_OpNeq32F(v) 934 case OpNeq64: 935 return rewriteValueAMD64_OpNeq64(v) 936 case OpNeq64F: 937 return rewriteValueAMD64_OpNeq64F(v) 938 case OpNeq8: 939 return rewriteValueAMD64_OpNeq8(v) 940 case OpNeqB: 941 return rewriteValueAMD64_OpNeqB(v) 942 case OpNeqPtr: 943 return rewriteValueAMD64_OpNeqPtr(v) 944 case OpNilCheck: 945 v.Op = OpAMD64LoweredNilCheck 946 return true 947 case OpNot: 948 return rewriteValueAMD64_OpNot(v) 949 case OpOffPtr: 950 return rewriteValueAMD64_OpOffPtr(v) 951 case OpOr16: 952 v.Op = OpAMD64ORL 953 return true 954 case OpOr32: 955 v.Op = OpAMD64ORL 956 return true 957 case OpOr64: 958 v.Op = OpAMD64ORQ 959 return true 960 case OpOr8: 961 v.Op = OpAMD64ORL 962 return true 963 case OpOrB: 964 v.Op = OpAMD64ORL 965 return true 966 case OpPanicBounds: 967 return rewriteValueAMD64_OpPanicBounds(v) 968 case OpPopCount16: 969 return rewriteValueAMD64_OpPopCount16(v) 970 case OpPopCount32: 971 v.Op = OpAMD64POPCNTL 972 return true 973 case OpPopCount64: 974 v.Op = OpAMD64POPCNTQ 975 return true 976 case OpPopCount8: 977 return rewriteValueAMD64_OpPopCount8(v) 978 case OpPrefetchCache: 979 v.Op = OpAMD64PrefetchT0 980 return true 981 case OpPrefetchCacheStreamed: 982 v.Op = OpAMD64PrefetchNTA 983 return true 984 case OpRotateLeft16: 985 v.Op = OpAMD64ROLW 986 return true 987 case OpRotateLeft32: 988 v.Op = OpAMD64ROLL 989 return true 990 case OpRotateLeft64: 991 v.Op = OpAMD64ROLQ 992 return true 993 case OpRotateLeft8: 994 v.Op = OpAMD64ROLB 995 return true 996 case OpRound32F: 997 v.Op = OpCopy 998 return true 999 case OpRound64F: 1000 v.Op = OpCopy 1001 return true 1002 case OpRoundToEven: 1003 return rewriteValueAMD64_OpRoundToEven(v) 1004 case OpRsh16Ux16: 1005 return rewriteValueAMD64_OpRsh16Ux16(v) 1006 case OpRsh16Ux32: 1007 return rewriteValueAMD64_OpRsh16Ux32(v) 1008 case OpRsh16Ux64: 1009 return rewriteValueAMD64_OpRsh16Ux64(v) 1010 case OpRsh16Ux8: 1011 return rewriteValueAMD64_OpRsh16Ux8(v) 1012 case OpRsh16x16: 1013 return rewriteValueAMD64_OpRsh16x16(v) 1014 case OpRsh16x32: 1015 return rewriteValueAMD64_OpRsh16x32(v) 1016 case OpRsh16x64: 1017 return rewriteValueAMD64_OpRsh16x64(v) 1018 case OpRsh16x8: 1019 return rewriteValueAMD64_OpRsh16x8(v) 1020 case OpRsh32Ux16: 1021 return rewriteValueAMD64_OpRsh32Ux16(v) 1022 case OpRsh32Ux32: 1023 return rewriteValueAMD64_OpRsh32Ux32(v) 1024 case OpRsh32Ux64: 1025 return rewriteValueAMD64_OpRsh32Ux64(v) 1026 case OpRsh32Ux8: 1027 return rewriteValueAMD64_OpRsh32Ux8(v) 1028 case OpRsh32x16: 1029 return rewriteValueAMD64_OpRsh32x16(v) 1030 case OpRsh32x32: 1031 return rewriteValueAMD64_OpRsh32x32(v) 1032 case OpRsh32x64: 1033 return rewriteValueAMD64_OpRsh32x64(v) 1034 case OpRsh32x8: 1035 return rewriteValueAMD64_OpRsh32x8(v) 1036 case OpRsh64Ux16: 1037 return rewriteValueAMD64_OpRsh64Ux16(v) 1038 case OpRsh64Ux32: 1039 return rewriteValueAMD64_OpRsh64Ux32(v) 1040 case OpRsh64Ux64: 1041 return rewriteValueAMD64_OpRsh64Ux64(v) 1042 case OpRsh64Ux8: 1043 return rewriteValueAMD64_OpRsh64Ux8(v) 1044 case OpRsh64x16: 1045 return rewriteValueAMD64_OpRsh64x16(v) 1046 case OpRsh64x32: 1047 return rewriteValueAMD64_OpRsh64x32(v) 1048 case OpRsh64x64: 1049 return rewriteValueAMD64_OpRsh64x64(v) 1050 case OpRsh64x8: 1051 return rewriteValueAMD64_OpRsh64x8(v) 1052 case OpRsh8Ux16: 1053 return rewriteValueAMD64_OpRsh8Ux16(v) 1054 case OpRsh8Ux32: 1055 return rewriteValueAMD64_OpRsh8Ux32(v) 1056 case OpRsh8Ux64: 1057 return rewriteValueAMD64_OpRsh8Ux64(v) 1058 case OpRsh8Ux8: 1059 return rewriteValueAMD64_OpRsh8Ux8(v) 1060 case OpRsh8x16: 1061 return rewriteValueAMD64_OpRsh8x16(v) 1062 case OpRsh8x32: 1063 return rewriteValueAMD64_OpRsh8x32(v) 1064 case OpRsh8x64: 1065 return rewriteValueAMD64_OpRsh8x64(v) 1066 case OpRsh8x8: 1067 return rewriteValueAMD64_OpRsh8x8(v) 1068 case OpSelect0: 1069 return rewriteValueAMD64_OpSelect0(v) 1070 case OpSelect1: 1071 return rewriteValueAMD64_OpSelect1(v) 1072 case OpSelectN: 1073 return rewriteValueAMD64_OpSelectN(v) 1074 case OpSignExt16to32: 1075 v.Op = OpAMD64MOVWQSX 1076 return true 1077 case OpSignExt16to64: 1078 v.Op = OpAMD64MOVWQSX 1079 return true 1080 case OpSignExt32to64: 1081 v.Op = OpAMD64MOVLQSX 1082 return true 1083 case OpSignExt8to16: 1084 v.Op = OpAMD64MOVBQSX 1085 return true 1086 case OpSignExt8to32: 1087 v.Op = OpAMD64MOVBQSX 1088 return true 1089 case OpSignExt8to64: 1090 v.Op = OpAMD64MOVBQSX 1091 return true 1092 case OpSlicemask: 1093 return rewriteValueAMD64_OpSlicemask(v) 1094 case OpSpectreIndex: 1095 return rewriteValueAMD64_OpSpectreIndex(v) 1096 case OpSpectreSliceIndex: 1097 return rewriteValueAMD64_OpSpectreSliceIndex(v) 1098 case OpSqrt: 1099 v.Op = OpAMD64SQRTSD 1100 return true 1101 case OpSqrt32: 1102 v.Op = OpAMD64SQRTSS 1103 return true 1104 case OpStaticCall: 1105 v.Op = OpAMD64CALLstatic 1106 return true 1107 case OpStore: 1108 return rewriteValueAMD64_OpStore(v) 1109 case OpSub16: 1110 v.Op = OpAMD64SUBL 1111 return true 1112 case OpSub32: 1113 v.Op = OpAMD64SUBL 1114 return true 1115 case OpSub32F: 1116 v.Op = OpAMD64SUBSS 1117 return true 1118 case OpSub64: 1119 v.Op = OpAMD64SUBQ 1120 return true 1121 case OpSub64F: 1122 v.Op = OpAMD64SUBSD 1123 return true 1124 case OpSub8: 1125 v.Op = OpAMD64SUBL 1126 return true 1127 case OpSubPtr: 1128 v.Op = OpAMD64SUBQ 1129 return true 1130 case OpTailCall: 1131 v.Op = OpAMD64CALLtail 1132 return true 1133 case OpTrunc: 1134 return rewriteValueAMD64_OpTrunc(v) 1135 case OpTrunc16to8: 1136 v.Op = OpCopy 1137 return true 1138 case OpTrunc32to16: 1139 v.Op = OpCopy 1140 return true 1141 case OpTrunc32to8: 1142 v.Op = OpCopy 1143 return true 1144 case OpTrunc64to16: 1145 v.Op = OpCopy 1146 return true 1147 case OpTrunc64to32: 1148 v.Op = OpCopy 1149 return true 1150 case OpTrunc64to8: 1151 v.Op = OpCopy 1152 return true 1153 case OpWB: 1154 v.Op = OpAMD64LoweredWB 1155 return true 1156 case OpXor16: 1157 v.Op = OpAMD64XORL 1158 return true 1159 case OpXor32: 1160 v.Op = OpAMD64XORL 1161 return true 1162 case OpXor64: 1163 v.Op = OpAMD64XORQ 1164 return true 1165 case OpXor8: 1166 v.Op = OpAMD64XORL 1167 return true 1168 case OpZero: 1169 return rewriteValueAMD64_OpZero(v) 1170 case OpZeroExt16to32: 1171 v.Op = OpAMD64MOVWQZX 1172 return true 1173 case OpZeroExt16to64: 1174 v.Op = OpAMD64MOVWQZX 1175 return true 1176 case OpZeroExt32to64: 1177 v.Op = OpAMD64MOVLQZX 1178 return true 1179 case OpZeroExt8to16: 1180 v.Op = OpAMD64MOVBQZX 1181 return true 1182 case OpZeroExt8to32: 1183 v.Op = OpAMD64MOVBQZX 1184 return true 1185 case OpZeroExt8to64: 1186 v.Op = OpAMD64MOVBQZX 1187 return true 1188 } 1189 return false 1190 } 1191 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool { 1192 v_2 := v.Args[2] 1193 v_1 := v.Args[1] 1194 v_0 := v.Args[0] 1195 // match: (ADCQ x (MOVQconst [c]) carry) 1196 // cond: is32Bit(c) 1197 // result: (ADCQconst x [int32(c)] carry) 1198 for { 1199 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1200 x := v_0 1201 if v_1.Op != OpAMD64MOVQconst { 1202 continue 1203 } 1204 c := auxIntToInt64(v_1.AuxInt) 1205 carry := v_2 1206 if !(is32Bit(c)) { 1207 continue 1208 } 1209 v.reset(OpAMD64ADCQconst) 1210 v.AuxInt = int32ToAuxInt(int32(c)) 1211 v.AddArg2(x, carry) 1212 return true 1213 } 1214 break 1215 } 1216 // match: (ADCQ x y (FlagEQ)) 1217 // result: (ADDQcarry x y) 1218 for { 1219 x := v_0 1220 y := v_1 1221 if v_2.Op != OpAMD64FlagEQ { 1222 break 1223 } 1224 v.reset(OpAMD64ADDQcarry) 1225 v.AddArg2(x, y) 1226 return true 1227 } 1228 return false 1229 } 1230 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool { 1231 v_1 := v.Args[1] 1232 v_0 := v.Args[0] 1233 // match: (ADCQconst x [c] (FlagEQ)) 1234 // result: (ADDQconstcarry x [c]) 1235 for { 1236 c := auxIntToInt32(v.AuxInt) 1237 x := v_0 1238 if v_1.Op != OpAMD64FlagEQ { 1239 break 1240 } 1241 v.reset(OpAMD64ADDQconstcarry) 1242 v.AuxInt = int32ToAuxInt(c) 1243 v.AddArg(x) 1244 return true 1245 } 1246 return false 1247 } 1248 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { 1249 v_1 := v.Args[1] 1250 v_0 := v.Args[0] 1251 // match: (ADDL x (MOVLconst [c])) 1252 // result: (ADDLconst [c] x) 1253 for { 1254 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1255 x := v_0 1256 if v_1.Op != OpAMD64MOVLconst { 1257 continue 1258 } 1259 c := auxIntToInt32(v_1.AuxInt) 1260 v.reset(OpAMD64ADDLconst) 1261 v.AuxInt = int32ToAuxInt(c) 1262 v.AddArg(x) 1263 return true 1264 } 1265 break 1266 } 1267 // match: (ADDL x (SHLLconst [3] y)) 1268 // result: (LEAL8 x y) 1269 for { 1270 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1271 x := v_0 1272 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 { 1273 continue 1274 } 1275 y := v_1.Args[0] 1276 v.reset(OpAMD64LEAL8) 1277 v.AddArg2(x, y) 1278 return true 1279 } 1280 break 1281 } 1282 // match: (ADDL x (SHLLconst [2] y)) 1283 // result: (LEAL4 x y) 1284 for { 1285 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1286 x := v_0 1287 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 { 1288 continue 1289 } 1290 y := v_1.Args[0] 1291 v.reset(OpAMD64LEAL4) 1292 v.AddArg2(x, y) 1293 return true 1294 } 1295 break 1296 } 1297 // match: (ADDL x (SHLLconst [1] y)) 1298 // result: (LEAL2 x y) 1299 for { 1300 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1301 x := v_0 1302 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { 1303 continue 1304 } 1305 y := v_1.Args[0] 1306 v.reset(OpAMD64LEAL2) 1307 v.AddArg2(x, y) 1308 return true 1309 } 1310 break 1311 } 1312 // match: (ADDL x (ADDL y y)) 1313 // result: (LEAL2 x y) 1314 for { 1315 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1316 x := v_0 1317 if v_1.Op != OpAMD64ADDL { 1318 continue 1319 } 1320 y := v_1.Args[1] 1321 if y != v_1.Args[0] { 1322 continue 1323 } 1324 v.reset(OpAMD64LEAL2) 1325 v.AddArg2(x, y) 1326 return true 1327 } 1328 break 1329 } 1330 // match: (ADDL x (ADDL x y)) 1331 // result: (LEAL2 y x) 1332 for { 1333 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1334 x := v_0 1335 if v_1.Op != OpAMD64ADDL { 1336 continue 1337 } 1338 _ = v_1.Args[1] 1339 v_1_0 := v_1.Args[0] 1340 v_1_1 := v_1.Args[1] 1341 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { 1342 if x != v_1_0 { 1343 continue 1344 } 1345 y := v_1_1 1346 v.reset(OpAMD64LEAL2) 1347 v.AddArg2(y, x) 1348 return true 1349 } 1350 } 1351 break 1352 } 1353 // match: (ADDL (ADDLconst [c] x) y) 1354 // result: (LEAL1 [c] x y) 1355 for { 1356 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1357 if v_0.Op != OpAMD64ADDLconst { 1358 continue 1359 } 1360 c := auxIntToInt32(v_0.AuxInt) 1361 x := v_0.Args[0] 1362 y := v_1 1363 v.reset(OpAMD64LEAL1) 1364 v.AuxInt = int32ToAuxInt(c) 1365 v.AddArg2(x, y) 1366 return true 1367 } 1368 break 1369 } 1370 // match: (ADDL x (LEAL [c] {s} y)) 1371 // cond: x.Op != OpSB && y.Op != OpSB 1372 // result: (LEAL1 [c] {s} x y) 1373 for { 1374 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1375 x := v_0 1376 if v_1.Op != OpAMD64LEAL { 1377 continue 1378 } 1379 c := auxIntToInt32(v_1.AuxInt) 1380 s := auxToSym(v_1.Aux) 1381 y := v_1.Args[0] 1382 if !(x.Op != OpSB && y.Op != OpSB) { 1383 continue 1384 } 1385 v.reset(OpAMD64LEAL1) 1386 v.AuxInt = int32ToAuxInt(c) 1387 v.Aux = symToAux(s) 1388 v.AddArg2(x, y) 1389 return true 1390 } 1391 break 1392 } 1393 // match: (ADDL x (NEGL y)) 1394 // result: (SUBL x y) 1395 for { 1396 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1397 x := v_0 1398 if v_1.Op != OpAMD64NEGL { 1399 continue 1400 } 1401 y := v_1.Args[0] 1402 v.reset(OpAMD64SUBL) 1403 v.AddArg2(x, y) 1404 return true 1405 } 1406 break 1407 } 1408 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1409 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 1410 // result: (ADDLload x [off] {sym} ptr mem) 1411 for { 1412 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1413 x := v_0 1414 l := v_1 1415 if l.Op != OpAMD64MOVLload { 1416 continue 1417 } 1418 off := auxIntToInt32(l.AuxInt) 1419 sym := auxToSym(l.Aux) 1420 mem := l.Args[1] 1421 ptr := l.Args[0] 1422 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 1423 continue 1424 } 1425 v.reset(OpAMD64ADDLload) 1426 v.AuxInt = int32ToAuxInt(off) 1427 v.Aux = symToAux(sym) 1428 v.AddArg3(x, ptr, mem) 1429 return true 1430 } 1431 break 1432 } 1433 return false 1434 } 1435 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { 1436 v_0 := v.Args[0] 1437 // match: (ADDLconst [c] (ADDL x y)) 1438 // result: (LEAL1 [c] x y) 1439 for { 1440 c := auxIntToInt32(v.AuxInt) 1441 if v_0.Op != OpAMD64ADDL { 1442 break 1443 } 1444 y := v_0.Args[1] 1445 x := v_0.Args[0] 1446 v.reset(OpAMD64LEAL1) 1447 v.AuxInt = int32ToAuxInt(c) 1448 v.AddArg2(x, y) 1449 return true 1450 } 1451 // match: (ADDLconst [c] (SHLLconst [1] x)) 1452 // result: (LEAL1 [c] x x) 1453 for { 1454 c := auxIntToInt32(v.AuxInt) 1455 if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 { 1456 break 1457 } 1458 x := v_0.Args[0] 1459 v.reset(OpAMD64LEAL1) 1460 v.AuxInt = int32ToAuxInt(c) 1461 v.AddArg2(x, x) 1462 return true 1463 } 1464 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1465 // cond: is32Bit(int64(c)+int64(d)) 1466 // result: (LEAL [c+d] {s} x) 1467 for { 1468 c := auxIntToInt32(v.AuxInt) 1469 if v_0.Op != OpAMD64LEAL { 1470 break 1471 } 1472 d := auxIntToInt32(v_0.AuxInt) 1473 s := auxToSym(v_0.Aux) 1474 x := v_0.Args[0] 1475 if !(is32Bit(int64(c) + int64(d))) { 1476 break 1477 } 1478 v.reset(OpAMD64LEAL) 1479 v.AuxInt = int32ToAuxInt(c + d) 1480 v.Aux = symToAux(s) 1481 v.AddArg(x) 1482 return true 1483 } 1484 // match: (ADDLconst [c] (LEAL1 [d] {s} x y)) 1485 // cond: is32Bit(int64(c)+int64(d)) 1486 // result: (LEAL1 [c+d] {s} x y) 1487 for { 1488 c := auxIntToInt32(v.AuxInt) 1489 if v_0.Op != OpAMD64LEAL1 { 1490 break 1491 } 1492 d := auxIntToInt32(v_0.AuxInt) 1493 s := auxToSym(v_0.Aux) 1494 y := v_0.Args[1] 1495 x := v_0.Args[0] 1496 if !(is32Bit(int64(c) + int64(d))) { 1497 break 1498 } 1499 v.reset(OpAMD64LEAL1) 1500 v.AuxInt = int32ToAuxInt(c + d) 1501 v.Aux = symToAux(s) 1502 v.AddArg2(x, y) 1503 return true 1504 } 1505 // match: (ADDLconst [c] (LEAL2 [d] {s} x y)) 1506 // cond: is32Bit(int64(c)+int64(d)) 1507 // result: (LEAL2 [c+d] {s} x y) 1508 for { 1509 c := auxIntToInt32(v.AuxInt) 1510 if v_0.Op != OpAMD64LEAL2 { 1511 break 1512 } 1513 d := auxIntToInt32(v_0.AuxInt) 1514 s := auxToSym(v_0.Aux) 1515 y := v_0.Args[1] 1516 x := v_0.Args[0] 1517 if !(is32Bit(int64(c) + int64(d))) { 1518 break 1519 } 1520 v.reset(OpAMD64LEAL2) 1521 v.AuxInt = int32ToAuxInt(c + d) 1522 v.Aux = symToAux(s) 1523 v.AddArg2(x, y) 1524 return true 1525 } 1526 // match: (ADDLconst [c] (LEAL4 [d] {s} x y)) 1527 // cond: is32Bit(int64(c)+int64(d)) 1528 // result: (LEAL4 [c+d] {s} x y) 1529 for { 1530 c := auxIntToInt32(v.AuxInt) 1531 if v_0.Op != OpAMD64LEAL4 { 1532 break 1533 } 1534 d := auxIntToInt32(v_0.AuxInt) 1535 s := auxToSym(v_0.Aux) 1536 y := v_0.Args[1] 1537 x := v_0.Args[0] 1538 if !(is32Bit(int64(c) + int64(d))) { 1539 break 1540 } 1541 v.reset(OpAMD64LEAL4) 1542 v.AuxInt = int32ToAuxInt(c + d) 1543 v.Aux = symToAux(s) 1544 v.AddArg2(x, y) 1545 return true 1546 } 1547 // match: (ADDLconst [c] (LEAL8 [d] {s} x y)) 1548 // cond: is32Bit(int64(c)+int64(d)) 1549 // result: (LEAL8 [c+d] {s} x y) 1550 for { 1551 c := auxIntToInt32(v.AuxInt) 1552 if v_0.Op != OpAMD64LEAL8 { 1553 break 1554 } 1555 d := auxIntToInt32(v_0.AuxInt) 1556 s := auxToSym(v_0.Aux) 1557 y := v_0.Args[1] 1558 x := v_0.Args[0] 1559 if !(is32Bit(int64(c) + int64(d))) { 1560 break 1561 } 1562 v.reset(OpAMD64LEAL8) 1563 v.AuxInt = int32ToAuxInt(c + d) 1564 v.Aux = symToAux(s) 1565 v.AddArg2(x, y) 1566 return true 1567 } 1568 // match: (ADDLconst [c] x) 1569 // cond: c==0 1570 // result: x 1571 for { 1572 c := auxIntToInt32(v.AuxInt) 1573 x := v_0 1574 if !(c == 0) { 1575 break 1576 } 1577 v.copyOf(x) 1578 return true 1579 } 1580 // match: (ADDLconst [c] (MOVLconst [d])) 1581 // result: (MOVLconst [c+d]) 1582 for { 1583 c := auxIntToInt32(v.AuxInt) 1584 if v_0.Op != OpAMD64MOVLconst { 1585 break 1586 } 1587 d := auxIntToInt32(v_0.AuxInt) 1588 v.reset(OpAMD64MOVLconst) 1589 v.AuxInt = int32ToAuxInt(c + d) 1590 return true 1591 } 1592 // match: (ADDLconst [c] (ADDLconst [d] x)) 1593 // result: (ADDLconst [c+d] x) 1594 for { 1595 c := auxIntToInt32(v.AuxInt) 1596 if v_0.Op != OpAMD64ADDLconst { 1597 break 1598 } 1599 d := auxIntToInt32(v_0.AuxInt) 1600 x := v_0.Args[0] 1601 v.reset(OpAMD64ADDLconst) 1602 v.AuxInt = int32ToAuxInt(c + d) 1603 v.AddArg(x) 1604 return true 1605 } 1606 // match: (ADDLconst [off] x:(SP)) 1607 // result: (LEAL [off] x) 1608 for { 1609 off := auxIntToInt32(v.AuxInt) 1610 x := v_0 1611 if x.Op != OpSP { 1612 break 1613 } 1614 v.reset(OpAMD64LEAL) 1615 v.AuxInt = int32ToAuxInt(off) 1616 v.AddArg(x) 1617 return true 1618 } 1619 return false 1620 } 1621 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool { 1622 v_1 := v.Args[1] 1623 v_0 := v.Args[0] 1624 // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 1625 // cond: ValAndOff(valoff1).canAdd32(off2) 1626 // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 1627 for { 1628 valoff1 := auxIntToValAndOff(v.AuxInt) 1629 sym := auxToSym(v.Aux) 1630 if v_0.Op != OpAMD64ADDQconst { 1631 break 1632 } 1633 off2 := auxIntToInt32(v_0.AuxInt) 1634 base := v_0.Args[0] 1635 mem := v_1 1636 if !(ValAndOff(valoff1).canAdd32(off2)) { 1637 break 1638 } 1639 v.reset(OpAMD64ADDLconstmodify) 1640 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 1641 v.Aux = symToAux(sym) 1642 v.AddArg2(base, mem) 1643 return true 1644 } 1645 // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 1646 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 1647 // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 1648 for { 1649 valoff1 := auxIntToValAndOff(v.AuxInt) 1650 sym1 := auxToSym(v.Aux) 1651 if v_0.Op != OpAMD64LEAQ { 1652 break 1653 } 1654 off2 := auxIntToInt32(v_0.AuxInt) 1655 sym2 := auxToSym(v_0.Aux) 1656 base := v_0.Args[0] 1657 mem := v_1 1658 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 1659 break 1660 } 1661 v.reset(OpAMD64ADDLconstmodify) 1662 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 1663 v.Aux = symToAux(mergeSym(sym1, sym2)) 1664 v.AddArg2(base, mem) 1665 return true 1666 } 1667 return false 1668 } 1669 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { 1670 v_2 := v.Args[2] 1671 v_1 := v.Args[1] 1672 v_0 := v.Args[0] 1673 b := v.Block 1674 typ := &b.Func.Config.Types 1675 // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem) 1676 // cond: is32Bit(int64(off1)+int64(off2)) 1677 // result: (ADDLload [off1+off2] {sym} val base mem) 1678 for { 1679 off1 := auxIntToInt32(v.AuxInt) 1680 sym := auxToSym(v.Aux) 1681 val := v_0 1682 if v_1.Op != OpAMD64ADDQconst { 1683 break 1684 } 1685 off2 := auxIntToInt32(v_1.AuxInt) 1686 base := v_1.Args[0] 1687 mem := v_2 1688 if !(is32Bit(int64(off1) + int64(off2))) { 1689 break 1690 } 1691 v.reset(OpAMD64ADDLload) 1692 v.AuxInt = int32ToAuxInt(off1 + off2) 1693 v.Aux = symToAux(sym) 1694 v.AddArg3(val, base, mem) 1695 return true 1696 } 1697 // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 1698 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 1699 // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 1700 for { 1701 off1 := auxIntToInt32(v.AuxInt) 1702 sym1 := auxToSym(v.Aux) 1703 val := v_0 1704 if v_1.Op != OpAMD64LEAQ { 1705 break 1706 } 1707 off2 := auxIntToInt32(v_1.AuxInt) 1708 sym2 := auxToSym(v_1.Aux) 1709 base := v_1.Args[0] 1710 mem := v_2 1711 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 1712 break 1713 } 1714 v.reset(OpAMD64ADDLload) 1715 v.AuxInt = int32ToAuxInt(off1 + off2) 1716 v.Aux = symToAux(mergeSym(sym1, sym2)) 1717 v.AddArg3(val, base, mem) 1718 return true 1719 } 1720 // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 1721 // result: (ADDL x (MOVLf2i y)) 1722 for { 1723 off := auxIntToInt32(v.AuxInt) 1724 sym := auxToSym(v.Aux) 1725 x := v_0 1726 ptr := v_1 1727 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 1728 break 1729 } 1730 y := v_2.Args[1] 1731 if ptr != v_2.Args[0] { 1732 break 1733 } 1734 v.reset(OpAMD64ADDL) 1735 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 1736 v0.AddArg(y) 1737 v.AddArg2(x, v0) 1738 return true 1739 } 1740 return false 1741 } 1742 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool { 1743 v_2 := v.Args[2] 1744 v_1 := v.Args[1] 1745 v_0 := v.Args[0] 1746 // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 1747 // cond: is32Bit(int64(off1)+int64(off2)) 1748 // result: (ADDLmodify [off1+off2] {sym} base val mem) 1749 for { 1750 off1 := auxIntToInt32(v.AuxInt) 1751 sym := auxToSym(v.Aux) 1752 if v_0.Op != OpAMD64ADDQconst { 1753 break 1754 } 1755 off2 := auxIntToInt32(v_0.AuxInt) 1756 base := v_0.Args[0] 1757 val := v_1 1758 mem := v_2 1759 if !(is32Bit(int64(off1) + int64(off2))) { 1760 break 1761 } 1762 v.reset(OpAMD64ADDLmodify) 1763 v.AuxInt = int32ToAuxInt(off1 + off2) 1764 v.Aux = symToAux(sym) 1765 v.AddArg3(base, val, mem) 1766 return true 1767 } 1768 // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 1769 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 1770 // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 1771 for { 1772 off1 := auxIntToInt32(v.AuxInt) 1773 sym1 := auxToSym(v.Aux) 1774 if v_0.Op != OpAMD64LEAQ { 1775 break 1776 } 1777 off2 := auxIntToInt32(v_0.AuxInt) 1778 sym2 := auxToSym(v_0.Aux) 1779 base := v_0.Args[0] 1780 val := v_1 1781 mem := v_2 1782 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 1783 break 1784 } 1785 v.reset(OpAMD64ADDLmodify) 1786 v.AuxInt = int32ToAuxInt(off1 + off2) 1787 v.Aux = symToAux(mergeSym(sym1, sym2)) 1788 v.AddArg3(base, val, mem) 1789 return true 1790 } 1791 return false 1792 } 1793 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { 1794 v_1 := v.Args[1] 1795 v_0 := v.Args[0] 1796 // match: (ADDQ x (MOVQconst [c])) 1797 // cond: is32Bit(c) 1798 // result: (ADDQconst [int32(c)] x) 1799 for { 1800 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1801 x := v_0 1802 if v_1.Op != OpAMD64MOVQconst { 1803 continue 1804 } 1805 c := auxIntToInt64(v_1.AuxInt) 1806 if !(is32Bit(c)) { 1807 continue 1808 } 1809 v.reset(OpAMD64ADDQconst) 1810 v.AuxInt = int32ToAuxInt(int32(c)) 1811 v.AddArg(x) 1812 return true 1813 } 1814 break 1815 } 1816 // match: (ADDQ x (MOVLconst [c])) 1817 // result: (ADDQconst [c] x) 1818 for { 1819 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1820 x := v_0 1821 if v_1.Op != OpAMD64MOVLconst { 1822 continue 1823 } 1824 c := auxIntToInt32(v_1.AuxInt) 1825 v.reset(OpAMD64ADDQconst) 1826 v.AuxInt = int32ToAuxInt(c) 1827 v.AddArg(x) 1828 return true 1829 } 1830 break 1831 } 1832 // match: (ADDQ x (SHLQconst [3] y)) 1833 // result: (LEAQ8 x y) 1834 for { 1835 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1836 x := v_0 1837 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 { 1838 continue 1839 } 1840 y := v_1.Args[0] 1841 v.reset(OpAMD64LEAQ8) 1842 v.AddArg2(x, y) 1843 return true 1844 } 1845 break 1846 } 1847 // match: (ADDQ x (SHLQconst [2] y)) 1848 // result: (LEAQ4 x y) 1849 for { 1850 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1851 x := v_0 1852 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 { 1853 continue 1854 } 1855 y := v_1.Args[0] 1856 v.reset(OpAMD64LEAQ4) 1857 v.AddArg2(x, y) 1858 return true 1859 } 1860 break 1861 } 1862 // match: (ADDQ x (SHLQconst [1] y)) 1863 // result: (LEAQ2 x y) 1864 for { 1865 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1866 x := v_0 1867 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { 1868 continue 1869 } 1870 y := v_1.Args[0] 1871 v.reset(OpAMD64LEAQ2) 1872 v.AddArg2(x, y) 1873 return true 1874 } 1875 break 1876 } 1877 // match: (ADDQ x (ADDQ y y)) 1878 // result: (LEAQ2 x y) 1879 for { 1880 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1881 x := v_0 1882 if v_1.Op != OpAMD64ADDQ { 1883 continue 1884 } 1885 y := v_1.Args[1] 1886 if y != v_1.Args[0] { 1887 continue 1888 } 1889 v.reset(OpAMD64LEAQ2) 1890 v.AddArg2(x, y) 1891 return true 1892 } 1893 break 1894 } 1895 // match: (ADDQ x (ADDQ x y)) 1896 // result: (LEAQ2 y x) 1897 for { 1898 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1899 x := v_0 1900 if v_1.Op != OpAMD64ADDQ { 1901 continue 1902 } 1903 _ = v_1.Args[1] 1904 v_1_0 := v_1.Args[0] 1905 v_1_1 := v_1.Args[1] 1906 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { 1907 if x != v_1_0 { 1908 continue 1909 } 1910 y := v_1_1 1911 v.reset(OpAMD64LEAQ2) 1912 v.AddArg2(y, x) 1913 return true 1914 } 1915 } 1916 break 1917 } 1918 // match: (ADDQ (ADDQconst [c] x) y) 1919 // result: (LEAQ1 [c] x y) 1920 for { 1921 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1922 if v_0.Op != OpAMD64ADDQconst { 1923 continue 1924 } 1925 c := auxIntToInt32(v_0.AuxInt) 1926 x := v_0.Args[0] 1927 y := v_1 1928 v.reset(OpAMD64LEAQ1) 1929 v.AuxInt = int32ToAuxInt(c) 1930 v.AddArg2(x, y) 1931 return true 1932 } 1933 break 1934 } 1935 // match: (ADDQ x (LEAQ [c] {s} y)) 1936 // cond: x.Op != OpSB && y.Op != OpSB 1937 // result: (LEAQ1 [c] {s} x y) 1938 for { 1939 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1940 x := v_0 1941 if v_1.Op != OpAMD64LEAQ { 1942 continue 1943 } 1944 c := auxIntToInt32(v_1.AuxInt) 1945 s := auxToSym(v_1.Aux) 1946 y := v_1.Args[0] 1947 if !(x.Op != OpSB && y.Op != OpSB) { 1948 continue 1949 } 1950 v.reset(OpAMD64LEAQ1) 1951 v.AuxInt = int32ToAuxInt(c) 1952 v.Aux = symToAux(s) 1953 v.AddArg2(x, y) 1954 return true 1955 } 1956 break 1957 } 1958 // match: (ADDQ x (NEGQ y)) 1959 // result: (SUBQ x y) 1960 for { 1961 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1962 x := v_0 1963 if v_1.Op != OpAMD64NEGQ { 1964 continue 1965 } 1966 y := v_1.Args[0] 1967 v.reset(OpAMD64SUBQ) 1968 v.AddArg2(x, y) 1969 return true 1970 } 1971 break 1972 } 1973 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 1974 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 1975 // result: (ADDQload x [off] {sym} ptr mem) 1976 for { 1977 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1978 x := v_0 1979 l := v_1 1980 if l.Op != OpAMD64MOVQload { 1981 continue 1982 } 1983 off := auxIntToInt32(l.AuxInt) 1984 sym := auxToSym(l.Aux) 1985 mem := l.Args[1] 1986 ptr := l.Args[0] 1987 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 1988 continue 1989 } 1990 v.reset(OpAMD64ADDQload) 1991 v.AuxInt = int32ToAuxInt(off) 1992 v.Aux = symToAux(sym) 1993 v.AddArg3(x, ptr, mem) 1994 return true 1995 } 1996 break 1997 } 1998 return false 1999 } 2000 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool { 2001 v_1 := v.Args[1] 2002 v_0 := v.Args[0] 2003 // match: (ADDQcarry x (MOVQconst [c])) 2004 // cond: is32Bit(c) 2005 // result: (ADDQconstcarry x [int32(c)]) 2006 for { 2007 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2008 x := v_0 2009 if v_1.Op != OpAMD64MOVQconst { 2010 continue 2011 } 2012 c := auxIntToInt64(v_1.AuxInt) 2013 if !(is32Bit(c)) { 2014 continue 2015 } 2016 v.reset(OpAMD64ADDQconstcarry) 2017 v.AuxInt = int32ToAuxInt(int32(c)) 2018 v.AddArg(x) 2019 return true 2020 } 2021 break 2022 } 2023 return false 2024 } 2025 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { 2026 v_0 := v.Args[0] 2027 // match: (ADDQconst [c] (ADDQ x y)) 2028 // result: (LEAQ1 [c] x y) 2029 for { 2030 c := auxIntToInt32(v.AuxInt) 2031 if v_0.Op != OpAMD64ADDQ { 2032 break 2033 } 2034 y := v_0.Args[1] 2035 x := v_0.Args[0] 2036 v.reset(OpAMD64LEAQ1) 2037 v.AuxInt = int32ToAuxInt(c) 2038 v.AddArg2(x, y) 2039 return true 2040 } 2041 // match: (ADDQconst [c] (SHLQconst [1] x)) 2042 // result: (LEAQ1 [c] x x) 2043 for { 2044 c := auxIntToInt32(v.AuxInt) 2045 if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 { 2046 break 2047 } 2048 x := v_0.Args[0] 2049 v.reset(OpAMD64LEAQ1) 2050 v.AuxInt = int32ToAuxInt(c) 2051 v.AddArg2(x, x) 2052 return true 2053 } 2054 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 2055 // cond: is32Bit(int64(c)+int64(d)) 2056 // result: (LEAQ [c+d] {s} x) 2057 for { 2058 c := auxIntToInt32(v.AuxInt) 2059 if v_0.Op != OpAMD64LEAQ { 2060 break 2061 } 2062 d := auxIntToInt32(v_0.AuxInt) 2063 s := auxToSym(v_0.Aux) 2064 x := v_0.Args[0] 2065 if !(is32Bit(int64(c) + int64(d))) { 2066 break 2067 } 2068 v.reset(OpAMD64LEAQ) 2069 v.AuxInt = int32ToAuxInt(c + d) 2070 v.Aux = symToAux(s) 2071 v.AddArg(x) 2072 return true 2073 } 2074 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 2075 // cond: is32Bit(int64(c)+int64(d)) 2076 // result: (LEAQ1 [c+d] {s} x y) 2077 for { 2078 c := auxIntToInt32(v.AuxInt) 2079 if v_0.Op != OpAMD64LEAQ1 { 2080 break 2081 } 2082 d := auxIntToInt32(v_0.AuxInt) 2083 s := auxToSym(v_0.Aux) 2084 y := v_0.Args[1] 2085 x := v_0.Args[0] 2086 if !(is32Bit(int64(c) + int64(d))) { 2087 break 2088 } 2089 v.reset(OpAMD64LEAQ1) 2090 v.AuxInt = int32ToAuxInt(c + d) 2091 v.Aux = symToAux(s) 2092 v.AddArg2(x, y) 2093 return true 2094 } 2095 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 2096 // cond: is32Bit(int64(c)+int64(d)) 2097 // result: (LEAQ2 [c+d] {s} x y) 2098 for { 2099 c := auxIntToInt32(v.AuxInt) 2100 if v_0.Op != OpAMD64LEAQ2 { 2101 break 2102 } 2103 d := auxIntToInt32(v_0.AuxInt) 2104 s := auxToSym(v_0.Aux) 2105 y := v_0.Args[1] 2106 x := v_0.Args[0] 2107 if !(is32Bit(int64(c) + int64(d))) { 2108 break 2109 } 2110 v.reset(OpAMD64LEAQ2) 2111 v.AuxInt = int32ToAuxInt(c + d) 2112 v.Aux = symToAux(s) 2113 v.AddArg2(x, y) 2114 return true 2115 } 2116 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 2117 // cond: is32Bit(int64(c)+int64(d)) 2118 // result: (LEAQ4 [c+d] {s} x y) 2119 for { 2120 c := auxIntToInt32(v.AuxInt) 2121 if v_0.Op != OpAMD64LEAQ4 { 2122 break 2123 } 2124 d := auxIntToInt32(v_0.AuxInt) 2125 s := auxToSym(v_0.Aux) 2126 y := v_0.Args[1] 2127 x := v_0.Args[0] 2128 if !(is32Bit(int64(c) + int64(d))) { 2129 break 2130 } 2131 v.reset(OpAMD64LEAQ4) 2132 v.AuxInt = int32ToAuxInt(c + d) 2133 v.Aux = symToAux(s) 2134 v.AddArg2(x, y) 2135 return true 2136 } 2137 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 2138 // cond: is32Bit(int64(c)+int64(d)) 2139 // result: (LEAQ8 [c+d] {s} x y) 2140 for { 2141 c := auxIntToInt32(v.AuxInt) 2142 if v_0.Op != OpAMD64LEAQ8 { 2143 break 2144 } 2145 d := auxIntToInt32(v_0.AuxInt) 2146 s := auxToSym(v_0.Aux) 2147 y := v_0.Args[1] 2148 x := v_0.Args[0] 2149 if !(is32Bit(int64(c) + int64(d))) { 2150 break 2151 } 2152 v.reset(OpAMD64LEAQ8) 2153 v.AuxInt = int32ToAuxInt(c + d) 2154 v.Aux = symToAux(s) 2155 v.AddArg2(x, y) 2156 return true 2157 } 2158 // match: (ADDQconst [0] x) 2159 // result: x 2160 for { 2161 if auxIntToInt32(v.AuxInt) != 0 { 2162 break 2163 } 2164 x := v_0 2165 v.copyOf(x) 2166 return true 2167 } 2168 // match: (ADDQconst [c] (MOVQconst [d])) 2169 // result: (MOVQconst [int64(c)+d]) 2170 for { 2171 c := auxIntToInt32(v.AuxInt) 2172 if v_0.Op != OpAMD64MOVQconst { 2173 break 2174 } 2175 d := auxIntToInt64(v_0.AuxInt) 2176 v.reset(OpAMD64MOVQconst) 2177 v.AuxInt = int64ToAuxInt(int64(c) + d) 2178 return true 2179 } 2180 // match: (ADDQconst [c] (ADDQconst [d] x)) 2181 // cond: is32Bit(int64(c)+int64(d)) 2182 // result: (ADDQconst [c+d] x) 2183 for { 2184 c := auxIntToInt32(v.AuxInt) 2185 if v_0.Op != OpAMD64ADDQconst { 2186 break 2187 } 2188 d := auxIntToInt32(v_0.AuxInt) 2189 x := v_0.Args[0] 2190 if !(is32Bit(int64(c) + int64(d))) { 2191 break 2192 } 2193 v.reset(OpAMD64ADDQconst) 2194 v.AuxInt = int32ToAuxInt(c + d) 2195 v.AddArg(x) 2196 return true 2197 } 2198 // match: (ADDQconst [off] x:(SP)) 2199 // result: (LEAQ [off] x) 2200 for { 2201 off := auxIntToInt32(v.AuxInt) 2202 x := v_0 2203 if x.Op != OpSP { 2204 break 2205 } 2206 v.reset(OpAMD64LEAQ) 2207 v.AuxInt = int32ToAuxInt(off) 2208 v.AddArg(x) 2209 return true 2210 } 2211 return false 2212 } 2213 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool { 2214 v_1 := v.Args[1] 2215 v_0 := v.Args[0] 2216 // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 2217 // cond: ValAndOff(valoff1).canAdd32(off2) 2218 // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 2219 for { 2220 valoff1 := auxIntToValAndOff(v.AuxInt) 2221 sym := auxToSym(v.Aux) 2222 if v_0.Op != OpAMD64ADDQconst { 2223 break 2224 } 2225 off2 := auxIntToInt32(v_0.AuxInt) 2226 base := v_0.Args[0] 2227 mem := v_1 2228 if !(ValAndOff(valoff1).canAdd32(off2)) { 2229 break 2230 } 2231 v.reset(OpAMD64ADDQconstmodify) 2232 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 2233 v.Aux = symToAux(sym) 2234 v.AddArg2(base, mem) 2235 return true 2236 } 2237 // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 2238 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 2239 // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 2240 for { 2241 valoff1 := auxIntToValAndOff(v.AuxInt) 2242 sym1 := auxToSym(v.Aux) 2243 if v_0.Op != OpAMD64LEAQ { 2244 break 2245 } 2246 off2 := auxIntToInt32(v_0.AuxInt) 2247 sym2 := auxToSym(v_0.Aux) 2248 base := v_0.Args[0] 2249 mem := v_1 2250 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 2251 break 2252 } 2253 v.reset(OpAMD64ADDQconstmodify) 2254 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 2255 v.Aux = symToAux(mergeSym(sym1, sym2)) 2256 v.AddArg2(base, mem) 2257 return true 2258 } 2259 return false 2260 } 2261 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { 2262 v_2 := v.Args[2] 2263 v_1 := v.Args[1] 2264 v_0 := v.Args[0] 2265 b := v.Block 2266 typ := &b.Func.Config.Types 2267 // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem) 2268 // cond: is32Bit(int64(off1)+int64(off2)) 2269 // result: (ADDQload [off1+off2] {sym} val base mem) 2270 for { 2271 off1 := auxIntToInt32(v.AuxInt) 2272 sym := auxToSym(v.Aux) 2273 val := v_0 2274 if v_1.Op != OpAMD64ADDQconst { 2275 break 2276 } 2277 off2 := auxIntToInt32(v_1.AuxInt) 2278 base := v_1.Args[0] 2279 mem := v_2 2280 if !(is32Bit(int64(off1) + int64(off2))) { 2281 break 2282 } 2283 v.reset(OpAMD64ADDQload) 2284 v.AuxInt = int32ToAuxInt(off1 + off2) 2285 v.Aux = symToAux(sym) 2286 v.AddArg3(val, base, mem) 2287 return true 2288 } 2289 // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2290 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 2291 // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2292 for { 2293 off1 := auxIntToInt32(v.AuxInt) 2294 sym1 := auxToSym(v.Aux) 2295 val := v_0 2296 if v_1.Op != OpAMD64LEAQ { 2297 break 2298 } 2299 off2 := auxIntToInt32(v_1.AuxInt) 2300 sym2 := auxToSym(v_1.Aux) 2301 base := v_1.Args[0] 2302 mem := v_2 2303 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 2304 break 2305 } 2306 v.reset(OpAMD64ADDQload) 2307 v.AuxInt = int32ToAuxInt(off1 + off2) 2308 v.Aux = symToAux(mergeSym(sym1, sym2)) 2309 v.AddArg3(val, base, mem) 2310 return true 2311 } 2312 // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2313 // result: (ADDQ x (MOVQf2i y)) 2314 for { 2315 off := auxIntToInt32(v.AuxInt) 2316 sym := auxToSym(v.Aux) 2317 x := v_0 2318 ptr := v_1 2319 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 2320 break 2321 } 2322 y := v_2.Args[1] 2323 if ptr != v_2.Args[0] { 2324 break 2325 } 2326 v.reset(OpAMD64ADDQ) 2327 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 2328 v0.AddArg(y) 2329 v.AddArg2(x, v0) 2330 return true 2331 } 2332 return false 2333 } 2334 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool { 2335 v_2 := v.Args[2] 2336 v_1 := v.Args[1] 2337 v_0 := v.Args[0] 2338 // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 2339 // cond: is32Bit(int64(off1)+int64(off2)) 2340 // result: (ADDQmodify [off1+off2] {sym} base val mem) 2341 for { 2342 off1 := auxIntToInt32(v.AuxInt) 2343 sym := auxToSym(v.Aux) 2344 if v_0.Op != OpAMD64ADDQconst { 2345 break 2346 } 2347 off2 := auxIntToInt32(v_0.AuxInt) 2348 base := v_0.Args[0] 2349 val := v_1 2350 mem := v_2 2351 if !(is32Bit(int64(off1) + int64(off2))) { 2352 break 2353 } 2354 v.reset(OpAMD64ADDQmodify) 2355 v.AuxInt = int32ToAuxInt(off1 + off2) 2356 v.Aux = symToAux(sym) 2357 v.AddArg3(base, val, mem) 2358 return true 2359 } 2360 // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 2361 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 2362 // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 2363 for { 2364 off1 := auxIntToInt32(v.AuxInt) 2365 sym1 := auxToSym(v.Aux) 2366 if v_0.Op != OpAMD64LEAQ { 2367 break 2368 } 2369 off2 := auxIntToInt32(v_0.AuxInt) 2370 sym2 := auxToSym(v_0.Aux) 2371 base := v_0.Args[0] 2372 val := v_1 2373 mem := v_2 2374 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 2375 break 2376 } 2377 v.reset(OpAMD64ADDQmodify) 2378 v.AuxInt = int32ToAuxInt(off1 + off2) 2379 v.Aux = symToAux(mergeSym(sym1, sym2)) 2380 v.AddArg3(base, val, mem) 2381 return true 2382 } 2383 return false 2384 } 2385 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool { 2386 v_1 := v.Args[1] 2387 v_0 := v.Args[0] 2388 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 2389 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 2390 // result: (ADDSDload x [off] {sym} ptr mem) 2391 for { 2392 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2393 x := v_0 2394 l := v_1 2395 if l.Op != OpAMD64MOVSDload { 2396 continue 2397 } 2398 off := auxIntToInt32(l.AuxInt) 2399 sym := auxToSym(l.Aux) 2400 mem := l.Args[1] 2401 ptr := l.Args[0] 2402 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 2403 continue 2404 } 2405 v.reset(OpAMD64ADDSDload) 2406 v.AuxInt = int32ToAuxInt(off) 2407 v.Aux = symToAux(sym) 2408 v.AddArg3(x, ptr, mem) 2409 return true 2410 } 2411 break 2412 } 2413 return false 2414 } 2415 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { 2416 v_2 := v.Args[2] 2417 v_1 := v.Args[1] 2418 v_0 := v.Args[0] 2419 b := v.Block 2420 typ := &b.Func.Config.Types 2421 // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem) 2422 // cond: is32Bit(int64(off1)+int64(off2)) 2423 // result: (ADDSDload [off1+off2] {sym} val base mem) 2424 for { 2425 off1 := auxIntToInt32(v.AuxInt) 2426 sym := auxToSym(v.Aux) 2427 val := v_0 2428 if v_1.Op != OpAMD64ADDQconst { 2429 break 2430 } 2431 off2 := auxIntToInt32(v_1.AuxInt) 2432 base := v_1.Args[0] 2433 mem := v_2 2434 if !(is32Bit(int64(off1) + int64(off2))) { 2435 break 2436 } 2437 v.reset(OpAMD64ADDSDload) 2438 v.AuxInt = int32ToAuxInt(off1 + off2) 2439 v.Aux = symToAux(sym) 2440 v.AddArg3(val, base, mem) 2441 return true 2442 } 2443 // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2444 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 2445 // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2446 for { 2447 off1 := auxIntToInt32(v.AuxInt) 2448 sym1 := auxToSym(v.Aux) 2449 val := v_0 2450 if v_1.Op != OpAMD64LEAQ { 2451 break 2452 } 2453 off2 := auxIntToInt32(v_1.AuxInt) 2454 sym2 := auxToSym(v_1.Aux) 2455 base := v_1.Args[0] 2456 mem := v_2 2457 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 2458 break 2459 } 2460 v.reset(OpAMD64ADDSDload) 2461 v.AuxInt = int32ToAuxInt(off1 + off2) 2462 v.Aux = symToAux(mergeSym(sym1, sym2)) 2463 v.AddArg3(val, base, mem) 2464 return true 2465 } 2466 // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 2467 // result: (ADDSD x (MOVQi2f y)) 2468 for { 2469 off := auxIntToInt32(v.AuxInt) 2470 sym := auxToSym(v.Aux) 2471 x := v_0 2472 ptr := v_1 2473 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 2474 break 2475 } 2476 y := v_2.Args[1] 2477 if ptr != v_2.Args[0] { 2478 break 2479 } 2480 v.reset(OpAMD64ADDSD) 2481 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) 2482 v0.AddArg(y) 2483 v.AddArg2(x, v0) 2484 return true 2485 } 2486 return false 2487 } 2488 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool { 2489 v_1 := v.Args[1] 2490 v_0 := v.Args[0] 2491 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 2492 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 2493 // result: (ADDSSload x [off] {sym} ptr mem) 2494 for { 2495 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2496 x := v_0 2497 l := v_1 2498 if l.Op != OpAMD64MOVSSload { 2499 continue 2500 } 2501 off := auxIntToInt32(l.AuxInt) 2502 sym := auxToSym(l.Aux) 2503 mem := l.Args[1] 2504 ptr := l.Args[0] 2505 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 2506 continue 2507 } 2508 v.reset(OpAMD64ADDSSload) 2509 v.AuxInt = int32ToAuxInt(off) 2510 v.Aux = symToAux(sym) 2511 v.AddArg3(x, ptr, mem) 2512 return true 2513 } 2514 break 2515 } 2516 return false 2517 } 2518 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { 2519 v_2 := v.Args[2] 2520 v_1 := v.Args[1] 2521 v_0 := v.Args[0] 2522 b := v.Block 2523 typ := &b.Func.Config.Types 2524 // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem) 2525 // cond: is32Bit(int64(off1)+int64(off2)) 2526 // result: (ADDSSload [off1+off2] {sym} val base mem) 2527 for { 2528 off1 := auxIntToInt32(v.AuxInt) 2529 sym := auxToSym(v.Aux) 2530 val := v_0 2531 if v_1.Op != OpAMD64ADDQconst { 2532 break 2533 } 2534 off2 := auxIntToInt32(v_1.AuxInt) 2535 base := v_1.Args[0] 2536 mem := v_2 2537 if !(is32Bit(int64(off1) + int64(off2))) { 2538 break 2539 } 2540 v.reset(OpAMD64ADDSSload) 2541 v.AuxInt = int32ToAuxInt(off1 + off2) 2542 v.Aux = symToAux(sym) 2543 v.AddArg3(val, base, mem) 2544 return true 2545 } 2546 // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2547 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 2548 // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2549 for { 2550 off1 := auxIntToInt32(v.AuxInt) 2551 sym1 := auxToSym(v.Aux) 2552 val := v_0 2553 if v_1.Op != OpAMD64LEAQ { 2554 break 2555 } 2556 off2 := auxIntToInt32(v_1.AuxInt) 2557 sym2 := auxToSym(v_1.Aux) 2558 base := v_1.Args[0] 2559 mem := v_2 2560 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 2561 break 2562 } 2563 v.reset(OpAMD64ADDSSload) 2564 v.AuxInt = int32ToAuxInt(off1 + off2) 2565 v.Aux = symToAux(mergeSym(sym1, sym2)) 2566 v.AddArg3(val, base, mem) 2567 return true 2568 } 2569 // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 2570 // result: (ADDSS x (MOVLi2f y)) 2571 for { 2572 off := auxIntToInt32(v.AuxInt) 2573 sym := auxToSym(v.Aux) 2574 x := v_0 2575 ptr := v_1 2576 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 2577 break 2578 } 2579 y := v_2.Args[1] 2580 if ptr != v_2.Args[0] { 2581 break 2582 } 2583 v.reset(OpAMD64ADDSS) 2584 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) 2585 v0.AddArg(y) 2586 v.AddArg2(x, v0) 2587 return true 2588 } 2589 return false 2590 } 2591 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { 2592 v_1 := v.Args[1] 2593 v_0 := v.Args[0] 2594 // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x) 2595 // result: (BTRL x y) 2596 for { 2597 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2598 if v_0.Op != OpAMD64NOTL { 2599 continue 2600 } 2601 v_0_0 := v_0.Args[0] 2602 if v_0_0.Op != OpAMD64SHLL { 2603 continue 2604 } 2605 y := v_0_0.Args[1] 2606 v_0_0_0 := v_0_0.Args[0] 2607 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { 2608 continue 2609 } 2610 x := v_1 2611 v.reset(OpAMD64BTRL) 2612 v.AddArg2(x, y) 2613 return true 2614 } 2615 break 2616 } 2617 // match: (ANDL (MOVLconst [c]) x) 2618 // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128 2619 // result: (BTRLconst [int8(log32(^c))] x) 2620 for { 2621 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2622 if v_0.Op != OpAMD64MOVLconst { 2623 continue 2624 } 2625 c := auxIntToInt32(v_0.AuxInt) 2626 x := v_1 2627 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) { 2628 continue 2629 } 2630 v.reset(OpAMD64BTRLconst) 2631 v.AuxInt = int8ToAuxInt(int8(log32(^c))) 2632 v.AddArg(x) 2633 return true 2634 } 2635 break 2636 } 2637 // match: (ANDL x (MOVLconst [c])) 2638 // result: (ANDLconst [c] x) 2639 for { 2640 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2641 x := v_0 2642 if v_1.Op != OpAMD64MOVLconst { 2643 continue 2644 } 2645 c := auxIntToInt32(v_1.AuxInt) 2646 v.reset(OpAMD64ANDLconst) 2647 v.AuxInt = int32ToAuxInt(c) 2648 v.AddArg(x) 2649 return true 2650 } 2651 break 2652 } 2653 // match: (ANDL x x) 2654 // result: x 2655 for { 2656 x := v_0 2657 if x != v_1 { 2658 break 2659 } 2660 v.copyOf(x) 2661 return true 2662 } 2663 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 2664 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 2665 // result: (ANDLload x [off] {sym} ptr mem) 2666 for { 2667 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2668 x := v_0 2669 l := v_1 2670 if l.Op != OpAMD64MOVLload { 2671 continue 2672 } 2673 off := auxIntToInt32(l.AuxInt) 2674 sym := auxToSym(l.Aux) 2675 mem := l.Args[1] 2676 ptr := l.Args[0] 2677 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 2678 continue 2679 } 2680 v.reset(OpAMD64ANDLload) 2681 v.AuxInt = int32ToAuxInt(off) 2682 v.Aux = symToAux(sym) 2683 v.AddArg3(x, ptr, mem) 2684 return true 2685 } 2686 break 2687 } 2688 // match: (ANDL x (NOTL y)) 2689 // cond: buildcfg.GOAMD64 >= 3 2690 // result: (ANDNL x y) 2691 for { 2692 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2693 x := v_0 2694 if v_1.Op != OpAMD64NOTL { 2695 continue 2696 } 2697 y := v_1.Args[0] 2698 if !(buildcfg.GOAMD64 >= 3) { 2699 continue 2700 } 2701 v.reset(OpAMD64ANDNL) 2702 v.AddArg2(x, y) 2703 return true 2704 } 2705 break 2706 } 2707 // match: (ANDL x (NEGL x)) 2708 // cond: buildcfg.GOAMD64 >= 3 2709 // result: (BLSIL x) 2710 for { 2711 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2712 x := v_0 2713 if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { 2714 continue 2715 } 2716 v.reset(OpAMD64BLSIL) 2717 v.AddArg(x) 2718 return true 2719 } 2720 break 2721 } 2722 // match: (ANDL x (ADDLconst [-1] x)) 2723 // cond: buildcfg.GOAMD64 >= 3 2724 // result: (BLSRL x) 2725 for { 2726 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2727 x := v_0 2728 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { 2729 continue 2730 } 2731 v.reset(OpAMD64BLSRL) 2732 v.AddArg(x) 2733 return true 2734 } 2735 break 2736 } 2737 return false 2738 } 2739 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool { 2740 v_0 := v.Args[0] 2741 // match: (ANDLconst [c] x) 2742 // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128 2743 // result: (BTRLconst [int8(log32(^c))] x) 2744 for { 2745 c := auxIntToInt32(v.AuxInt) 2746 x := v_0 2747 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) { 2748 break 2749 } 2750 v.reset(OpAMD64BTRLconst) 2751 v.AuxInt = int8ToAuxInt(int8(log32(^c))) 2752 v.AddArg(x) 2753 return true 2754 } 2755 // match: (ANDLconst [c] (ANDLconst [d] x)) 2756 // result: (ANDLconst [c & d] x) 2757 for { 2758 c := auxIntToInt32(v.AuxInt) 2759 if v_0.Op != OpAMD64ANDLconst { 2760 break 2761 } 2762 d := auxIntToInt32(v_0.AuxInt) 2763 x := v_0.Args[0] 2764 v.reset(OpAMD64ANDLconst) 2765 v.AuxInt = int32ToAuxInt(c & d) 2766 v.AddArg(x) 2767 return true 2768 } 2769 // match: (ANDLconst [c] (BTRLconst [d] x)) 2770 // result: (ANDLconst [c &^ (1<<uint32(d))] x) 2771 for { 2772 c := auxIntToInt32(v.AuxInt) 2773 if v_0.Op != OpAMD64BTRLconst { 2774 break 2775 } 2776 d := auxIntToInt8(v_0.AuxInt) 2777 x := v_0.Args[0] 2778 v.reset(OpAMD64ANDLconst) 2779 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d))) 2780 v.AddArg(x) 2781 return true 2782 } 2783 // match: (ANDLconst [ 0xFF] x) 2784 // result: (MOVBQZX x) 2785 for { 2786 if auxIntToInt32(v.AuxInt) != 0xFF { 2787 break 2788 } 2789 x := v_0 2790 v.reset(OpAMD64MOVBQZX) 2791 v.AddArg(x) 2792 return true 2793 } 2794 // match: (ANDLconst [0xFFFF] x) 2795 // result: (MOVWQZX x) 2796 for { 2797 if auxIntToInt32(v.AuxInt) != 0xFFFF { 2798 break 2799 } 2800 x := v_0 2801 v.reset(OpAMD64MOVWQZX) 2802 v.AddArg(x) 2803 return true 2804 } 2805 // match: (ANDLconst [c] _) 2806 // cond: c==0 2807 // result: (MOVLconst [0]) 2808 for { 2809 c := auxIntToInt32(v.AuxInt) 2810 if !(c == 0) { 2811 break 2812 } 2813 v.reset(OpAMD64MOVLconst) 2814 v.AuxInt = int32ToAuxInt(0) 2815 return true 2816 } 2817 // match: (ANDLconst [c] x) 2818 // cond: c==-1 2819 // result: x 2820 for { 2821 c := auxIntToInt32(v.AuxInt) 2822 x := v_0 2823 if !(c == -1) { 2824 break 2825 } 2826 v.copyOf(x) 2827 return true 2828 } 2829 // match: (ANDLconst [c] (MOVLconst [d])) 2830 // result: (MOVLconst [c&d]) 2831 for { 2832 c := auxIntToInt32(v.AuxInt) 2833 if v_0.Op != OpAMD64MOVLconst { 2834 break 2835 } 2836 d := auxIntToInt32(v_0.AuxInt) 2837 v.reset(OpAMD64MOVLconst) 2838 v.AuxInt = int32ToAuxInt(c & d) 2839 return true 2840 } 2841 return false 2842 } 2843 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool { 2844 v_1 := v.Args[1] 2845 v_0 := v.Args[0] 2846 // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 2847 // cond: ValAndOff(valoff1).canAdd32(off2) 2848 // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 2849 for { 2850 valoff1 := auxIntToValAndOff(v.AuxInt) 2851 sym := auxToSym(v.Aux) 2852 if v_0.Op != OpAMD64ADDQconst { 2853 break 2854 } 2855 off2 := auxIntToInt32(v_0.AuxInt) 2856 base := v_0.Args[0] 2857 mem := v_1 2858 if !(ValAndOff(valoff1).canAdd32(off2)) { 2859 break 2860 } 2861 v.reset(OpAMD64ANDLconstmodify) 2862 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 2863 v.Aux = symToAux(sym) 2864 v.AddArg2(base, mem) 2865 return true 2866 } 2867 // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 2868 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 2869 // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 2870 for { 2871 valoff1 := auxIntToValAndOff(v.AuxInt) 2872 sym1 := auxToSym(v.Aux) 2873 if v_0.Op != OpAMD64LEAQ { 2874 break 2875 } 2876 off2 := auxIntToInt32(v_0.AuxInt) 2877 sym2 := auxToSym(v_0.Aux) 2878 base := v_0.Args[0] 2879 mem := v_1 2880 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 2881 break 2882 } 2883 v.reset(OpAMD64ANDLconstmodify) 2884 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 2885 v.Aux = symToAux(mergeSym(sym1, sym2)) 2886 v.AddArg2(base, mem) 2887 return true 2888 } 2889 return false 2890 } 2891 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { 2892 v_2 := v.Args[2] 2893 v_1 := v.Args[1] 2894 v_0 := v.Args[0] 2895 b := v.Block 2896 typ := &b.Func.Config.Types 2897 // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem) 2898 // cond: is32Bit(int64(off1)+int64(off2)) 2899 // result: (ANDLload [off1+off2] {sym} val base mem) 2900 for { 2901 off1 := auxIntToInt32(v.AuxInt) 2902 sym := auxToSym(v.Aux) 2903 val := v_0 2904 if v_1.Op != OpAMD64ADDQconst { 2905 break 2906 } 2907 off2 := auxIntToInt32(v_1.AuxInt) 2908 base := v_1.Args[0] 2909 mem := v_2 2910 if !(is32Bit(int64(off1) + int64(off2))) { 2911 break 2912 } 2913 v.reset(OpAMD64ANDLload) 2914 v.AuxInt = int32ToAuxInt(off1 + off2) 2915 v.Aux = symToAux(sym) 2916 v.AddArg3(val, base, mem) 2917 return true 2918 } 2919 // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2920 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 2921 // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2922 for { 2923 off1 := auxIntToInt32(v.AuxInt) 2924 sym1 := auxToSym(v.Aux) 2925 val := v_0 2926 if v_1.Op != OpAMD64LEAQ { 2927 break 2928 } 2929 off2 := auxIntToInt32(v_1.AuxInt) 2930 sym2 := auxToSym(v_1.Aux) 2931 base := v_1.Args[0] 2932 mem := v_2 2933 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 2934 break 2935 } 2936 v.reset(OpAMD64ANDLload) 2937 v.AuxInt = int32ToAuxInt(off1 + off2) 2938 v.Aux = symToAux(mergeSym(sym1, sym2)) 2939 v.AddArg3(val, base, mem) 2940 return true 2941 } 2942 // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 2943 // result: (ANDL x (MOVLf2i y)) 2944 for { 2945 off := auxIntToInt32(v.AuxInt) 2946 sym := auxToSym(v.Aux) 2947 x := v_0 2948 ptr := v_1 2949 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 2950 break 2951 } 2952 y := v_2.Args[1] 2953 if ptr != v_2.Args[0] { 2954 break 2955 } 2956 v.reset(OpAMD64ANDL) 2957 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 2958 v0.AddArg(y) 2959 v.AddArg2(x, v0) 2960 return true 2961 } 2962 return false 2963 } 2964 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool { 2965 v_2 := v.Args[2] 2966 v_1 := v.Args[1] 2967 v_0 := v.Args[0] 2968 // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 2969 // cond: is32Bit(int64(off1)+int64(off2)) 2970 // result: (ANDLmodify [off1+off2] {sym} base val mem) 2971 for { 2972 off1 := auxIntToInt32(v.AuxInt) 2973 sym := auxToSym(v.Aux) 2974 if v_0.Op != OpAMD64ADDQconst { 2975 break 2976 } 2977 off2 := auxIntToInt32(v_0.AuxInt) 2978 base := v_0.Args[0] 2979 val := v_1 2980 mem := v_2 2981 if !(is32Bit(int64(off1) + int64(off2))) { 2982 break 2983 } 2984 v.reset(OpAMD64ANDLmodify) 2985 v.AuxInt = int32ToAuxInt(off1 + off2) 2986 v.Aux = symToAux(sym) 2987 v.AddArg3(base, val, mem) 2988 return true 2989 } 2990 // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 2991 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 2992 // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 2993 for { 2994 off1 := auxIntToInt32(v.AuxInt) 2995 sym1 := auxToSym(v.Aux) 2996 if v_0.Op != OpAMD64LEAQ { 2997 break 2998 } 2999 off2 := auxIntToInt32(v_0.AuxInt) 3000 sym2 := auxToSym(v_0.Aux) 3001 base := v_0.Args[0] 3002 val := v_1 3003 mem := v_2 3004 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 3005 break 3006 } 3007 v.reset(OpAMD64ANDLmodify) 3008 v.AuxInt = int32ToAuxInt(off1 + off2) 3009 v.Aux = symToAux(mergeSym(sym1, sym2)) 3010 v.AddArg3(base, val, mem) 3011 return true 3012 } 3013 return false 3014 } 3015 func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool { 3016 v_1 := v.Args[1] 3017 v_0 := v.Args[0] 3018 // match: (ANDNL x (SHLL (MOVLconst [1]) y)) 3019 // result: (BTRL x y) 3020 for { 3021 x := v_0 3022 if v_1.Op != OpAMD64SHLL { 3023 break 3024 } 3025 y := v_1.Args[1] 3026 v_1_0 := v_1.Args[0] 3027 if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 { 3028 break 3029 } 3030 v.reset(OpAMD64BTRL) 3031 v.AddArg2(x, y) 3032 return true 3033 } 3034 return false 3035 } 3036 func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool { 3037 v_1 := v.Args[1] 3038 v_0 := v.Args[0] 3039 // match: (ANDNQ x (SHLQ (MOVQconst [1]) y)) 3040 // result: (BTRQ x y) 3041 for { 3042 x := v_0 3043 if v_1.Op != OpAMD64SHLQ { 3044 break 3045 } 3046 y := v_1.Args[1] 3047 v_1_0 := v_1.Args[0] 3048 if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 { 3049 break 3050 } 3051 v.reset(OpAMD64BTRQ) 3052 v.AddArg2(x, y) 3053 return true 3054 } 3055 return false 3056 } 3057 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { 3058 v_1 := v.Args[1] 3059 v_0 := v.Args[0] 3060 // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x) 3061 // result: (BTRQ x y) 3062 for { 3063 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3064 if v_0.Op != OpAMD64NOTQ { 3065 continue 3066 } 3067 v_0_0 := v_0.Args[0] 3068 if v_0_0.Op != OpAMD64SHLQ { 3069 continue 3070 } 3071 y := v_0_0.Args[1] 3072 v_0_0_0 := v_0_0.Args[0] 3073 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { 3074 continue 3075 } 3076 x := v_1 3077 v.reset(OpAMD64BTRQ) 3078 v.AddArg2(x, y) 3079 return true 3080 } 3081 break 3082 } 3083 // match: (ANDQ (MOVQconst [c]) x) 3084 // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 3085 // result: (BTRQconst [int8(log64(^c))] x) 3086 for { 3087 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3088 if v_0.Op != OpAMD64MOVQconst { 3089 continue 3090 } 3091 c := auxIntToInt64(v_0.AuxInt) 3092 x := v_1 3093 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) { 3094 continue 3095 } 3096 v.reset(OpAMD64BTRQconst) 3097 v.AuxInt = int8ToAuxInt(int8(log64(^c))) 3098 v.AddArg(x) 3099 return true 3100 } 3101 break 3102 } 3103 // match: (ANDQ x (MOVQconst [c])) 3104 // cond: is32Bit(c) 3105 // result: (ANDQconst [int32(c)] x) 3106 for { 3107 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3108 x := v_0 3109 if v_1.Op != OpAMD64MOVQconst { 3110 continue 3111 } 3112 c := auxIntToInt64(v_1.AuxInt) 3113 if !(is32Bit(c)) { 3114 continue 3115 } 3116 v.reset(OpAMD64ANDQconst) 3117 v.AuxInt = int32ToAuxInt(int32(c)) 3118 v.AddArg(x) 3119 return true 3120 } 3121 break 3122 } 3123 // match: (ANDQ x x) 3124 // result: x 3125 for { 3126 x := v_0 3127 if x != v_1 { 3128 break 3129 } 3130 v.copyOf(x) 3131 return true 3132 } 3133 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 3134 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3135 // result: (ANDQload x [off] {sym} ptr mem) 3136 for { 3137 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3138 x := v_0 3139 l := v_1 3140 if l.Op != OpAMD64MOVQload { 3141 continue 3142 } 3143 off := auxIntToInt32(l.AuxInt) 3144 sym := auxToSym(l.Aux) 3145 mem := l.Args[1] 3146 ptr := l.Args[0] 3147 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3148 continue 3149 } 3150 v.reset(OpAMD64ANDQload) 3151 v.AuxInt = int32ToAuxInt(off) 3152 v.Aux = symToAux(sym) 3153 v.AddArg3(x, ptr, mem) 3154 return true 3155 } 3156 break 3157 } 3158 // match: (ANDQ x (NOTQ y)) 3159 // cond: buildcfg.GOAMD64 >= 3 3160 // result: (ANDNQ x y) 3161 for { 3162 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3163 x := v_0 3164 if v_1.Op != OpAMD64NOTQ { 3165 continue 3166 } 3167 y := v_1.Args[0] 3168 if !(buildcfg.GOAMD64 >= 3) { 3169 continue 3170 } 3171 v.reset(OpAMD64ANDNQ) 3172 v.AddArg2(x, y) 3173 return true 3174 } 3175 break 3176 } 3177 // match: (ANDQ x (NEGQ x)) 3178 // cond: buildcfg.GOAMD64 >= 3 3179 // result: (BLSIQ x) 3180 for { 3181 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3182 x := v_0 3183 if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { 3184 continue 3185 } 3186 v.reset(OpAMD64BLSIQ) 3187 v.AddArg(x) 3188 return true 3189 } 3190 break 3191 } 3192 // match: (ANDQ x (ADDQconst [-1] x)) 3193 // cond: buildcfg.GOAMD64 >= 3 3194 // result: (BLSRQ x) 3195 for { 3196 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3197 x := v_0 3198 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { 3199 continue 3200 } 3201 v.reset(OpAMD64BLSRQ) 3202 v.AddArg(x) 3203 return true 3204 } 3205 break 3206 } 3207 return false 3208 } 3209 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool { 3210 v_0 := v.Args[0] 3211 // match: (ANDQconst [c] x) 3212 // cond: isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128 3213 // result: (BTRQconst [int8(log32(^c))] x) 3214 for { 3215 c := auxIntToInt32(v.AuxInt) 3216 x := v_0 3217 if !(isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128) { 3218 break 3219 } 3220 v.reset(OpAMD64BTRQconst) 3221 v.AuxInt = int8ToAuxInt(int8(log32(^c))) 3222 v.AddArg(x) 3223 return true 3224 } 3225 // match: (ANDQconst [c] (ANDQconst [d] x)) 3226 // result: (ANDQconst [c & d] x) 3227 for { 3228 c := auxIntToInt32(v.AuxInt) 3229 if v_0.Op != OpAMD64ANDQconst { 3230 break 3231 } 3232 d := auxIntToInt32(v_0.AuxInt) 3233 x := v_0.Args[0] 3234 v.reset(OpAMD64ANDQconst) 3235 v.AuxInt = int32ToAuxInt(c & d) 3236 v.AddArg(x) 3237 return true 3238 } 3239 // match: (ANDQconst [c] (BTRQconst [d] x)) 3240 // cond: is32Bit(int64(c) &^ (1<<uint32(d))) 3241 // result: (ANDQconst [c &^ (1<<uint32(d))] x) 3242 for { 3243 c := auxIntToInt32(v.AuxInt) 3244 if v_0.Op != OpAMD64BTRQconst { 3245 break 3246 } 3247 d := auxIntToInt8(v_0.AuxInt) 3248 x := v_0.Args[0] 3249 if !(is32Bit(int64(c) &^ (1 << uint32(d)))) { 3250 break 3251 } 3252 v.reset(OpAMD64ANDQconst) 3253 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d))) 3254 v.AddArg(x) 3255 return true 3256 } 3257 // match: (ANDQconst [ 0xFF] x) 3258 // result: (MOVBQZX x) 3259 for { 3260 if auxIntToInt32(v.AuxInt) != 0xFF { 3261 break 3262 } 3263 x := v_0 3264 v.reset(OpAMD64MOVBQZX) 3265 v.AddArg(x) 3266 return true 3267 } 3268 // match: (ANDQconst [0xFFFF] x) 3269 // result: (MOVWQZX x) 3270 for { 3271 if auxIntToInt32(v.AuxInt) != 0xFFFF { 3272 break 3273 } 3274 x := v_0 3275 v.reset(OpAMD64MOVWQZX) 3276 v.AddArg(x) 3277 return true 3278 } 3279 // match: (ANDQconst [0] _) 3280 // result: (MOVQconst [0]) 3281 for { 3282 if auxIntToInt32(v.AuxInt) != 0 { 3283 break 3284 } 3285 v.reset(OpAMD64MOVQconst) 3286 v.AuxInt = int64ToAuxInt(0) 3287 return true 3288 } 3289 // match: (ANDQconst [-1] x) 3290 // result: x 3291 for { 3292 if auxIntToInt32(v.AuxInt) != -1 { 3293 break 3294 } 3295 x := v_0 3296 v.copyOf(x) 3297 return true 3298 } 3299 // match: (ANDQconst [c] (MOVQconst [d])) 3300 // result: (MOVQconst [int64(c)&d]) 3301 for { 3302 c := auxIntToInt32(v.AuxInt) 3303 if v_0.Op != OpAMD64MOVQconst { 3304 break 3305 } 3306 d := auxIntToInt64(v_0.AuxInt) 3307 v.reset(OpAMD64MOVQconst) 3308 v.AuxInt = int64ToAuxInt(int64(c) & d) 3309 return true 3310 } 3311 return false 3312 } 3313 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool { 3314 v_1 := v.Args[1] 3315 v_0 := v.Args[0] 3316 // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 3317 // cond: ValAndOff(valoff1).canAdd32(off2) 3318 // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 3319 for { 3320 valoff1 := auxIntToValAndOff(v.AuxInt) 3321 sym := auxToSym(v.Aux) 3322 if v_0.Op != OpAMD64ADDQconst { 3323 break 3324 } 3325 off2 := auxIntToInt32(v_0.AuxInt) 3326 base := v_0.Args[0] 3327 mem := v_1 3328 if !(ValAndOff(valoff1).canAdd32(off2)) { 3329 break 3330 } 3331 v.reset(OpAMD64ANDQconstmodify) 3332 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 3333 v.Aux = symToAux(sym) 3334 v.AddArg2(base, mem) 3335 return true 3336 } 3337 // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 3338 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 3339 // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 3340 for { 3341 valoff1 := auxIntToValAndOff(v.AuxInt) 3342 sym1 := auxToSym(v.Aux) 3343 if v_0.Op != OpAMD64LEAQ { 3344 break 3345 } 3346 off2 := auxIntToInt32(v_0.AuxInt) 3347 sym2 := auxToSym(v_0.Aux) 3348 base := v_0.Args[0] 3349 mem := v_1 3350 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 3351 break 3352 } 3353 v.reset(OpAMD64ANDQconstmodify) 3354 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 3355 v.Aux = symToAux(mergeSym(sym1, sym2)) 3356 v.AddArg2(base, mem) 3357 return true 3358 } 3359 return false 3360 } 3361 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { 3362 v_2 := v.Args[2] 3363 v_1 := v.Args[1] 3364 v_0 := v.Args[0] 3365 b := v.Block 3366 typ := &b.Func.Config.Types 3367 // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem) 3368 // cond: is32Bit(int64(off1)+int64(off2)) 3369 // result: (ANDQload [off1+off2] {sym} val base mem) 3370 for { 3371 off1 := auxIntToInt32(v.AuxInt) 3372 sym := auxToSym(v.Aux) 3373 val := v_0 3374 if v_1.Op != OpAMD64ADDQconst { 3375 break 3376 } 3377 off2 := auxIntToInt32(v_1.AuxInt) 3378 base := v_1.Args[0] 3379 mem := v_2 3380 if !(is32Bit(int64(off1) + int64(off2))) { 3381 break 3382 } 3383 v.reset(OpAMD64ANDQload) 3384 v.AuxInt = int32ToAuxInt(off1 + off2) 3385 v.Aux = symToAux(sym) 3386 v.AddArg3(val, base, mem) 3387 return true 3388 } 3389 // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 3390 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 3391 // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 3392 for { 3393 off1 := auxIntToInt32(v.AuxInt) 3394 sym1 := auxToSym(v.Aux) 3395 val := v_0 3396 if v_1.Op != OpAMD64LEAQ { 3397 break 3398 } 3399 off2 := auxIntToInt32(v_1.AuxInt) 3400 sym2 := auxToSym(v_1.Aux) 3401 base := v_1.Args[0] 3402 mem := v_2 3403 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 3404 break 3405 } 3406 v.reset(OpAMD64ANDQload) 3407 v.AuxInt = int32ToAuxInt(off1 + off2) 3408 v.Aux = symToAux(mergeSym(sym1, sym2)) 3409 v.AddArg3(val, base, mem) 3410 return true 3411 } 3412 // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 3413 // result: (ANDQ x (MOVQf2i y)) 3414 for { 3415 off := auxIntToInt32(v.AuxInt) 3416 sym := auxToSym(v.Aux) 3417 x := v_0 3418 ptr := v_1 3419 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 3420 break 3421 } 3422 y := v_2.Args[1] 3423 if ptr != v_2.Args[0] { 3424 break 3425 } 3426 v.reset(OpAMD64ANDQ) 3427 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 3428 v0.AddArg(y) 3429 v.AddArg2(x, v0) 3430 return true 3431 } 3432 return false 3433 } 3434 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool { 3435 v_2 := v.Args[2] 3436 v_1 := v.Args[1] 3437 v_0 := v.Args[0] 3438 // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 3439 // cond: is32Bit(int64(off1)+int64(off2)) 3440 // result: (ANDQmodify [off1+off2] {sym} base val mem) 3441 for { 3442 off1 := auxIntToInt32(v.AuxInt) 3443 sym := auxToSym(v.Aux) 3444 if v_0.Op != OpAMD64ADDQconst { 3445 break 3446 } 3447 off2 := auxIntToInt32(v_0.AuxInt) 3448 base := v_0.Args[0] 3449 val := v_1 3450 mem := v_2 3451 if !(is32Bit(int64(off1) + int64(off2))) { 3452 break 3453 } 3454 v.reset(OpAMD64ANDQmodify) 3455 v.AuxInt = int32ToAuxInt(off1 + off2) 3456 v.Aux = symToAux(sym) 3457 v.AddArg3(base, val, mem) 3458 return true 3459 } 3460 // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 3461 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 3462 // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3463 for { 3464 off1 := auxIntToInt32(v.AuxInt) 3465 sym1 := auxToSym(v.Aux) 3466 if v_0.Op != OpAMD64LEAQ { 3467 break 3468 } 3469 off2 := auxIntToInt32(v_0.AuxInt) 3470 sym2 := auxToSym(v_0.Aux) 3471 base := v_0.Args[0] 3472 val := v_1 3473 mem := v_2 3474 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 3475 break 3476 } 3477 v.reset(OpAMD64ANDQmodify) 3478 v.AuxInt = int32ToAuxInt(off1 + off2) 3479 v.Aux = symToAux(mergeSym(sym1, sym2)) 3480 v.AddArg3(base, val, mem) 3481 return true 3482 } 3483 return false 3484 } 3485 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool { 3486 v_0 := v.Args[0] 3487 b := v.Block 3488 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 3489 // result: (BSFQ (ORQconst <t> [1<<8] x)) 3490 for { 3491 if v_0.Op != OpAMD64ORQconst { 3492 break 3493 } 3494 t := v_0.Type 3495 if auxIntToInt32(v_0.AuxInt) != 1<<8 { 3496 break 3497 } 3498 v_0_0 := v_0.Args[0] 3499 if v_0_0.Op != OpAMD64MOVBQZX { 3500 break 3501 } 3502 x := v_0_0.Args[0] 3503 v.reset(OpAMD64BSFQ) 3504 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 3505 v0.AuxInt = int32ToAuxInt(1 << 8) 3506 v0.AddArg(x) 3507 v.AddArg(v0) 3508 return true 3509 } 3510 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 3511 // result: (BSFQ (ORQconst <t> [1<<16] x)) 3512 for { 3513 if v_0.Op != OpAMD64ORQconst { 3514 break 3515 } 3516 t := v_0.Type 3517 if auxIntToInt32(v_0.AuxInt) != 1<<16 { 3518 break 3519 } 3520 v_0_0 := v_0.Args[0] 3521 if v_0_0.Op != OpAMD64MOVWQZX { 3522 break 3523 } 3524 x := v_0_0.Args[0] 3525 v.reset(OpAMD64BSFQ) 3526 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 3527 v0.AuxInt = int32ToAuxInt(1 << 16) 3528 v0.AddArg(x) 3529 v.AddArg(v0) 3530 return true 3531 } 3532 return false 3533 } 3534 func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool { 3535 v_0 := v.Args[0] 3536 b := v.Block 3537 typ := &b.Func.Config.Types 3538 // match: (BSWAPL (BSWAPL p)) 3539 // result: p 3540 for { 3541 if v_0.Op != OpAMD64BSWAPL { 3542 break 3543 } 3544 p := v_0.Args[0] 3545 v.copyOf(p) 3546 return true 3547 } 3548 // match: (BSWAPL x:(MOVLload [i] {s} p mem)) 3549 // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 3550 // result: @x.Block (MOVBELload [i] {s} p mem) 3551 for { 3552 x := v_0 3553 if x.Op != OpAMD64MOVLload { 3554 break 3555 } 3556 i := auxIntToInt32(x.AuxInt) 3557 s := auxToSym(x.Aux) 3558 mem := x.Args[1] 3559 p := x.Args[0] 3560 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { 3561 break 3562 } 3563 b = x.Block 3564 v0 := b.NewValue0(x.Pos, OpAMD64MOVBELload, typ.UInt32) 3565 v.copyOf(v0) 3566 v0.AuxInt = int32ToAuxInt(i) 3567 v0.Aux = symToAux(s) 3568 v0.AddArg2(p, mem) 3569 return true 3570 } 3571 // match: (BSWAPL x:(MOVBELload [i] {s} p mem)) 3572 // cond: x.Uses == 1 3573 // result: @x.Block (MOVLload [i] {s} p mem) 3574 for { 3575 x := v_0 3576 if x.Op != OpAMD64MOVBELload { 3577 break 3578 } 3579 i := auxIntToInt32(x.AuxInt) 3580 s := auxToSym(x.Aux) 3581 mem := x.Args[1] 3582 p := x.Args[0] 3583 if !(x.Uses == 1) { 3584 break 3585 } 3586 b = x.Block 3587 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, typ.UInt32) 3588 v.copyOf(v0) 3589 v0.AuxInt = int32ToAuxInt(i) 3590 v0.Aux = symToAux(s) 3591 v0.AddArg2(p, mem) 3592 return true 3593 } 3594 return false 3595 } 3596 func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool { 3597 v_0 := v.Args[0] 3598 b := v.Block 3599 typ := &b.Func.Config.Types 3600 // match: (BSWAPQ (BSWAPQ p)) 3601 // result: p 3602 for { 3603 if v_0.Op != OpAMD64BSWAPQ { 3604 break 3605 } 3606 p := v_0.Args[0] 3607 v.copyOf(p) 3608 return true 3609 } 3610 // match: (BSWAPQ x:(MOVQload [i] {s} p mem)) 3611 // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 3612 // result: @x.Block (MOVBEQload [i] {s} p mem) 3613 for { 3614 x := v_0 3615 if x.Op != OpAMD64MOVQload { 3616 break 3617 } 3618 i := auxIntToInt32(x.AuxInt) 3619 s := auxToSym(x.Aux) 3620 mem := x.Args[1] 3621 p := x.Args[0] 3622 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { 3623 break 3624 } 3625 b = x.Block 3626 v0 := b.NewValue0(x.Pos, OpAMD64MOVBEQload, typ.UInt64) 3627 v.copyOf(v0) 3628 v0.AuxInt = int32ToAuxInt(i) 3629 v0.Aux = symToAux(s) 3630 v0.AddArg2(p, mem) 3631 return true 3632 } 3633 // match: (BSWAPQ x:(MOVBEQload [i] {s} p mem)) 3634 // cond: x.Uses == 1 3635 // result: @x.Block (MOVQload [i] {s} p mem) 3636 for { 3637 x := v_0 3638 if x.Op != OpAMD64MOVBEQload { 3639 break 3640 } 3641 i := auxIntToInt32(x.AuxInt) 3642 s := auxToSym(x.Aux) 3643 mem := x.Args[1] 3644 p := x.Args[0] 3645 if !(x.Uses == 1) { 3646 break 3647 } 3648 b = x.Block 3649 v0 := b.NewValue0(x.Pos, OpAMD64MOVQload, typ.UInt64) 3650 v.copyOf(v0) 3651 v0.AuxInt = int32ToAuxInt(i) 3652 v0.Aux = symToAux(s) 3653 v0.AddArg2(p, mem) 3654 return true 3655 } 3656 return false 3657 } 3658 func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool { 3659 v_0 := v.Args[0] 3660 // match: (BTCLconst [c] (XORLconst [d] x)) 3661 // result: (XORLconst [d ^ 1<<uint32(c)] x) 3662 for { 3663 c := auxIntToInt8(v.AuxInt) 3664 if v_0.Op != OpAMD64XORLconst { 3665 break 3666 } 3667 d := auxIntToInt32(v_0.AuxInt) 3668 x := v_0.Args[0] 3669 v.reset(OpAMD64XORLconst) 3670 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c)) 3671 v.AddArg(x) 3672 return true 3673 } 3674 // match: (BTCLconst [c] (BTCLconst [d] x)) 3675 // result: (XORLconst [1<<uint32(c) | 1<<uint32(d)] x) 3676 for { 3677 c := auxIntToInt8(v.AuxInt) 3678 if v_0.Op != OpAMD64BTCLconst { 3679 break 3680 } 3681 d := auxIntToInt8(v_0.AuxInt) 3682 x := v_0.Args[0] 3683 v.reset(OpAMD64XORLconst) 3684 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d)) 3685 v.AddArg(x) 3686 return true 3687 } 3688 // match: (BTCLconst [c] (MOVLconst [d])) 3689 // result: (MOVLconst [d^(1<<uint32(c))]) 3690 for { 3691 c := auxIntToInt8(v.AuxInt) 3692 if v_0.Op != OpAMD64MOVLconst { 3693 break 3694 } 3695 d := auxIntToInt32(v_0.AuxInt) 3696 v.reset(OpAMD64MOVLconst) 3697 v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c))) 3698 return true 3699 } 3700 return false 3701 } 3702 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool { 3703 v_0 := v.Args[0] 3704 // match: (BTCQconst [c] (XORQconst [d] x)) 3705 // cond: is32Bit(int64(d) ^ 1<<uint32(c)) 3706 // result: (XORQconst [d ^ 1<<uint32(c)] x) 3707 for { 3708 c := auxIntToInt8(v.AuxInt) 3709 if v_0.Op != OpAMD64XORQconst { 3710 break 3711 } 3712 d := auxIntToInt32(v_0.AuxInt) 3713 x := v_0.Args[0] 3714 if !(is32Bit(int64(d) ^ 1<<uint32(c))) { 3715 break 3716 } 3717 v.reset(OpAMD64XORQconst) 3718 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c)) 3719 v.AddArg(x) 3720 return true 3721 } 3722 // match: (BTCQconst [c] (BTCQconst [d] x)) 3723 // cond: is32Bit(1<<uint32(c) ^ 1<<uint32(d)) 3724 // result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x) 3725 for { 3726 c := auxIntToInt8(v.AuxInt) 3727 if v_0.Op != OpAMD64BTCQconst { 3728 break 3729 } 3730 d := auxIntToInt8(v_0.AuxInt) 3731 x := v_0.Args[0] 3732 if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) { 3733 break 3734 } 3735 v.reset(OpAMD64XORQconst) 3736 v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d)) 3737 v.AddArg(x) 3738 return true 3739 } 3740 // match: (BTCQconst [c] (MOVQconst [d])) 3741 // result: (MOVQconst [d^(1<<uint32(c))]) 3742 for { 3743 c := auxIntToInt8(v.AuxInt) 3744 if v_0.Op != OpAMD64MOVQconst { 3745 break 3746 } 3747 d := auxIntToInt64(v_0.AuxInt) 3748 v.reset(OpAMD64MOVQconst) 3749 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c))) 3750 return true 3751 } 3752 return false 3753 } 3754 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool { 3755 v_0 := v.Args[0] 3756 // match: (BTLconst [c] (SHRQconst [d] x)) 3757 // cond: (c+d)<64 3758 // result: (BTQconst [c+d] x) 3759 for { 3760 c := auxIntToInt8(v.AuxInt) 3761 if v_0.Op != OpAMD64SHRQconst { 3762 break 3763 } 3764 d := auxIntToInt8(v_0.AuxInt) 3765 x := v_0.Args[0] 3766 if !((c + d) < 64) { 3767 break 3768 } 3769 v.reset(OpAMD64BTQconst) 3770 v.AuxInt = int8ToAuxInt(c + d) 3771 v.AddArg(x) 3772 return true 3773 } 3774 // match: (BTLconst [c] (SHLQconst [d] x)) 3775 // cond: c>d 3776 // result: (BTLconst [c-d] x) 3777 for { 3778 c := auxIntToInt8(v.AuxInt) 3779 if v_0.Op != OpAMD64SHLQconst { 3780 break 3781 } 3782 d := auxIntToInt8(v_0.AuxInt) 3783 x := v_0.Args[0] 3784 if !(c > d) { 3785 break 3786 } 3787 v.reset(OpAMD64BTLconst) 3788 v.AuxInt = int8ToAuxInt(c - d) 3789 v.AddArg(x) 3790 return true 3791 } 3792 // match: (BTLconst [0] s:(SHRQ x y)) 3793 // result: (BTQ y x) 3794 for { 3795 if auxIntToInt8(v.AuxInt) != 0 { 3796 break 3797 } 3798 s := v_0 3799 if s.Op != OpAMD64SHRQ { 3800 break 3801 } 3802 y := s.Args[1] 3803 x := s.Args[0] 3804 v.reset(OpAMD64BTQ) 3805 v.AddArg2(y, x) 3806 return true 3807 } 3808 // match: (BTLconst [c] (SHRLconst [d] x)) 3809 // cond: (c+d)<32 3810 // result: (BTLconst [c+d] x) 3811 for { 3812 c := auxIntToInt8(v.AuxInt) 3813 if v_0.Op != OpAMD64SHRLconst { 3814 break 3815 } 3816 d := auxIntToInt8(v_0.AuxInt) 3817 x := v_0.Args[0] 3818 if !((c + d) < 32) { 3819 break 3820 } 3821 v.reset(OpAMD64BTLconst) 3822 v.AuxInt = int8ToAuxInt(c + d) 3823 v.AddArg(x) 3824 return true 3825 } 3826 // match: (BTLconst [c] (SHLLconst [d] x)) 3827 // cond: c>d 3828 // result: (BTLconst [c-d] x) 3829 for { 3830 c := auxIntToInt8(v.AuxInt) 3831 if v_0.Op != OpAMD64SHLLconst { 3832 break 3833 } 3834 d := auxIntToInt8(v_0.AuxInt) 3835 x := v_0.Args[0] 3836 if !(c > d) { 3837 break 3838 } 3839 v.reset(OpAMD64BTLconst) 3840 v.AuxInt = int8ToAuxInt(c - d) 3841 v.AddArg(x) 3842 return true 3843 } 3844 // match: (BTLconst [0] s:(SHRL x y)) 3845 // result: (BTL y x) 3846 for { 3847 if auxIntToInt8(v.AuxInt) != 0 { 3848 break 3849 } 3850 s := v_0 3851 if s.Op != OpAMD64SHRL { 3852 break 3853 } 3854 y := s.Args[1] 3855 x := s.Args[0] 3856 v.reset(OpAMD64BTL) 3857 v.AddArg2(y, x) 3858 return true 3859 } 3860 // match: (BTLconst [0] s:(SHRXL x y)) 3861 // result: (BTL y x) 3862 for { 3863 if auxIntToInt8(v.AuxInt) != 0 { 3864 break 3865 } 3866 s := v_0 3867 if s.Op != OpAMD64SHRXL { 3868 break 3869 } 3870 y := s.Args[1] 3871 x := s.Args[0] 3872 v.reset(OpAMD64BTL) 3873 v.AddArg2(y, x) 3874 return true 3875 } 3876 return false 3877 } 3878 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool { 3879 v_0 := v.Args[0] 3880 // match: (BTQconst [c] (SHRQconst [d] x)) 3881 // cond: (c+d)<64 3882 // result: (BTQconst [c+d] x) 3883 for { 3884 c := auxIntToInt8(v.AuxInt) 3885 if v_0.Op != OpAMD64SHRQconst { 3886 break 3887 } 3888 d := auxIntToInt8(v_0.AuxInt) 3889 x := v_0.Args[0] 3890 if !((c + d) < 64) { 3891 break 3892 } 3893 v.reset(OpAMD64BTQconst) 3894 v.AuxInt = int8ToAuxInt(c + d) 3895 v.AddArg(x) 3896 return true 3897 } 3898 // match: (BTQconst [c] (SHLQconst [d] x)) 3899 // cond: c>d 3900 // result: (BTQconst [c-d] x) 3901 for { 3902 c := auxIntToInt8(v.AuxInt) 3903 if v_0.Op != OpAMD64SHLQconst { 3904 break 3905 } 3906 d := auxIntToInt8(v_0.AuxInt) 3907 x := v_0.Args[0] 3908 if !(c > d) { 3909 break 3910 } 3911 v.reset(OpAMD64BTQconst) 3912 v.AuxInt = int8ToAuxInt(c - d) 3913 v.AddArg(x) 3914 return true 3915 } 3916 // match: (BTQconst [0] s:(SHRQ x y)) 3917 // result: (BTQ y x) 3918 for { 3919 if auxIntToInt8(v.AuxInt) != 0 { 3920 break 3921 } 3922 s := v_0 3923 if s.Op != OpAMD64SHRQ { 3924 break 3925 } 3926 y := s.Args[1] 3927 x := s.Args[0] 3928 v.reset(OpAMD64BTQ) 3929 v.AddArg2(y, x) 3930 return true 3931 } 3932 return false 3933 } 3934 func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool { 3935 v_0 := v.Args[0] 3936 // match: (BTRLconst [c] (BTSLconst [c] x)) 3937 // result: (BTRLconst [c] x) 3938 for { 3939 c := auxIntToInt8(v.AuxInt) 3940 if v_0.Op != OpAMD64BTSLconst || auxIntToInt8(v_0.AuxInt) != c { 3941 break 3942 } 3943 x := v_0.Args[0] 3944 v.reset(OpAMD64BTRLconst) 3945 v.AuxInt = int8ToAuxInt(c) 3946 v.AddArg(x) 3947 return true 3948 } 3949 // match: (BTRLconst [c] (BTCLconst [c] x)) 3950 // result: (BTRLconst [c] x) 3951 for { 3952 c := auxIntToInt8(v.AuxInt) 3953 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c { 3954 break 3955 } 3956 x := v_0.Args[0] 3957 v.reset(OpAMD64BTRLconst) 3958 v.AuxInt = int8ToAuxInt(c) 3959 v.AddArg(x) 3960 return true 3961 } 3962 // match: (BTRLconst [c] (ANDLconst [d] x)) 3963 // result: (ANDLconst [d &^ (1<<uint32(c))] x) 3964 for { 3965 c := auxIntToInt8(v.AuxInt) 3966 if v_0.Op != OpAMD64ANDLconst { 3967 break 3968 } 3969 d := auxIntToInt32(v_0.AuxInt) 3970 x := v_0.Args[0] 3971 v.reset(OpAMD64ANDLconst) 3972 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c))) 3973 v.AddArg(x) 3974 return true 3975 } 3976 // match: (BTRLconst [c] (BTRLconst [d] x)) 3977 // result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x) 3978 for { 3979 c := auxIntToInt8(v.AuxInt) 3980 if v_0.Op != OpAMD64BTRLconst { 3981 break 3982 } 3983 d := auxIntToInt8(v_0.AuxInt) 3984 x := v_0.Args[0] 3985 v.reset(OpAMD64ANDLconst) 3986 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d))) 3987 v.AddArg(x) 3988 return true 3989 } 3990 // match: (BTRLconst [c] (MOVLconst [d])) 3991 // result: (MOVLconst [d&^(1<<uint32(c))]) 3992 for { 3993 c := auxIntToInt8(v.AuxInt) 3994 if v_0.Op != OpAMD64MOVLconst { 3995 break 3996 } 3997 d := auxIntToInt32(v_0.AuxInt) 3998 v.reset(OpAMD64MOVLconst) 3999 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c))) 4000 return true 4001 } 4002 return false 4003 } 4004 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool { 4005 v_0 := v.Args[0] 4006 // match: (BTRQconst [c] (BTSQconst [c] x)) 4007 // result: (BTRQconst [c] x) 4008 for { 4009 c := auxIntToInt8(v.AuxInt) 4010 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c { 4011 break 4012 } 4013 x := v_0.Args[0] 4014 v.reset(OpAMD64BTRQconst) 4015 v.AuxInt = int8ToAuxInt(c) 4016 v.AddArg(x) 4017 return true 4018 } 4019 // match: (BTRQconst [c] (BTCQconst [c] x)) 4020 // result: (BTRQconst [c] x) 4021 for { 4022 c := auxIntToInt8(v.AuxInt) 4023 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c { 4024 break 4025 } 4026 x := v_0.Args[0] 4027 v.reset(OpAMD64BTRQconst) 4028 v.AuxInt = int8ToAuxInt(c) 4029 v.AddArg(x) 4030 return true 4031 } 4032 // match: (BTRQconst [c] (ANDQconst [d] x)) 4033 // cond: is32Bit(int64(d) &^ (1<<uint32(c))) 4034 // result: (ANDQconst [d &^ (1<<uint32(c))] x) 4035 for { 4036 c := auxIntToInt8(v.AuxInt) 4037 if v_0.Op != OpAMD64ANDQconst { 4038 break 4039 } 4040 d := auxIntToInt32(v_0.AuxInt) 4041 x := v_0.Args[0] 4042 if !(is32Bit(int64(d) &^ (1 << uint32(c)))) { 4043 break 4044 } 4045 v.reset(OpAMD64ANDQconst) 4046 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c))) 4047 v.AddArg(x) 4048 return true 4049 } 4050 // match: (BTRQconst [c] (BTRQconst [d] x)) 4051 // cond: is32Bit(^(1<<uint32(c) | 1<<uint32(d))) 4052 // result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x) 4053 for { 4054 c := auxIntToInt8(v.AuxInt) 4055 if v_0.Op != OpAMD64BTRQconst { 4056 break 4057 } 4058 d := auxIntToInt8(v_0.AuxInt) 4059 x := v_0.Args[0] 4060 if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) { 4061 break 4062 } 4063 v.reset(OpAMD64ANDQconst) 4064 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d))) 4065 v.AddArg(x) 4066 return true 4067 } 4068 // match: (BTRQconst [c] (MOVQconst [d])) 4069 // result: (MOVQconst [d&^(1<<uint32(c))]) 4070 for { 4071 c := auxIntToInt8(v.AuxInt) 4072 if v_0.Op != OpAMD64MOVQconst { 4073 break 4074 } 4075 d := auxIntToInt64(v_0.AuxInt) 4076 v.reset(OpAMD64MOVQconst) 4077 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c))) 4078 return true 4079 } 4080 return false 4081 } 4082 func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool { 4083 v_0 := v.Args[0] 4084 // match: (BTSLconst [c] (BTRLconst [c] x)) 4085 // result: (BTSLconst [c] x) 4086 for { 4087 c := auxIntToInt8(v.AuxInt) 4088 if v_0.Op != OpAMD64BTRLconst || auxIntToInt8(v_0.AuxInt) != c { 4089 break 4090 } 4091 x := v_0.Args[0] 4092 v.reset(OpAMD64BTSLconst) 4093 v.AuxInt = int8ToAuxInt(c) 4094 v.AddArg(x) 4095 return true 4096 } 4097 // match: (BTSLconst [c] (BTCLconst [c] x)) 4098 // result: (BTSLconst [c] x) 4099 for { 4100 c := auxIntToInt8(v.AuxInt) 4101 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c { 4102 break 4103 } 4104 x := v_0.Args[0] 4105 v.reset(OpAMD64BTSLconst) 4106 v.AuxInt = int8ToAuxInt(c) 4107 v.AddArg(x) 4108 return true 4109 } 4110 // match: (BTSLconst [c] (ORLconst [d] x)) 4111 // result: (ORLconst [d | 1<<uint32(c)] x) 4112 for { 4113 c := auxIntToInt8(v.AuxInt) 4114 if v_0.Op != OpAMD64ORLconst { 4115 break 4116 } 4117 d := auxIntToInt32(v_0.AuxInt) 4118 x := v_0.Args[0] 4119 v.reset(OpAMD64ORLconst) 4120 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c)) 4121 v.AddArg(x) 4122 return true 4123 } 4124 // match: (BTSLconst [c] (BTSLconst [d] x)) 4125 // result: (ORLconst [1<<uint32(c) | 1<<uint32(d)] x) 4126 for { 4127 c := auxIntToInt8(v.AuxInt) 4128 if v_0.Op != OpAMD64BTSLconst { 4129 break 4130 } 4131 d := auxIntToInt8(v_0.AuxInt) 4132 x := v_0.Args[0] 4133 v.reset(OpAMD64ORLconst) 4134 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d)) 4135 v.AddArg(x) 4136 return true 4137 } 4138 // match: (BTSLconst [c] (MOVLconst [d])) 4139 // result: (MOVLconst [d|(1<<uint32(c))]) 4140 for { 4141 c := auxIntToInt8(v.AuxInt) 4142 if v_0.Op != OpAMD64MOVLconst { 4143 break 4144 } 4145 d := auxIntToInt32(v_0.AuxInt) 4146 v.reset(OpAMD64MOVLconst) 4147 v.AuxInt = int32ToAuxInt(d | (1 << uint32(c))) 4148 return true 4149 } 4150 return false 4151 } 4152 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool { 4153 v_0 := v.Args[0] 4154 // match: (BTSQconst [c] (BTRQconst [c] x)) 4155 // result: (BTSQconst [c] x) 4156 for { 4157 c := auxIntToInt8(v.AuxInt) 4158 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c { 4159 break 4160 } 4161 x := v_0.Args[0] 4162 v.reset(OpAMD64BTSQconst) 4163 v.AuxInt = int8ToAuxInt(c) 4164 v.AddArg(x) 4165 return true 4166 } 4167 // match: (BTSQconst [c] (BTCQconst [c] x)) 4168 // result: (BTSQconst [c] x) 4169 for { 4170 c := auxIntToInt8(v.AuxInt) 4171 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c { 4172 break 4173 } 4174 x := v_0.Args[0] 4175 v.reset(OpAMD64BTSQconst) 4176 v.AuxInt = int8ToAuxInt(c) 4177 v.AddArg(x) 4178 return true 4179 } 4180 // match: (BTSQconst [c] (ORQconst [d] x)) 4181 // cond: is32Bit(int64(d) | 1<<uint32(c)) 4182 // result: (ORQconst [d | 1<<uint32(c)] x) 4183 for { 4184 c := auxIntToInt8(v.AuxInt) 4185 if v_0.Op != OpAMD64ORQconst { 4186 break 4187 } 4188 d := auxIntToInt32(v_0.AuxInt) 4189 x := v_0.Args[0] 4190 if !(is32Bit(int64(d) | 1<<uint32(c))) { 4191 break 4192 } 4193 v.reset(OpAMD64ORQconst) 4194 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c)) 4195 v.AddArg(x) 4196 return true 4197 } 4198 // match: (BTSQconst [c] (BTSQconst [d] x)) 4199 // cond: is32Bit(1<<uint32(c) | 1<<uint32(d)) 4200 // result: (ORQconst [1<<uint32(c) | 1<<uint32(d)] x) 4201 for { 4202 c := auxIntToInt8(v.AuxInt) 4203 if v_0.Op != OpAMD64BTSQconst { 4204 break 4205 } 4206 d := auxIntToInt8(v_0.AuxInt) 4207 x := v_0.Args[0] 4208 if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) { 4209 break 4210 } 4211 v.reset(OpAMD64ORQconst) 4212 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d)) 4213 v.AddArg(x) 4214 return true 4215 } 4216 // match: (BTSQconst [c] (MOVQconst [d])) 4217 // result: (MOVQconst [d|(1<<uint32(c))]) 4218 for { 4219 c := auxIntToInt8(v.AuxInt) 4220 if v_0.Op != OpAMD64MOVQconst { 4221 break 4222 } 4223 d := auxIntToInt64(v_0.AuxInt) 4224 v.reset(OpAMD64MOVQconst) 4225 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c))) 4226 return true 4227 } 4228 return false 4229 } 4230 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { 4231 v_2 := v.Args[2] 4232 v_1 := v.Args[1] 4233 v_0 := v.Args[0] 4234 // match: (CMOVLCC x y (InvertFlags cond)) 4235 // result: (CMOVLLS x y cond) 4236 for { 4237 x := v_0 4238 y := v_1 4239 if v_2.Op != OpAMD64InvertFlags { 4240 break 4241 } 4242 cond := v_2.Args[0] 4243 v.reset(OpAMD64CMOVLLS) 4244 v.AddArg3(x, y, cond) 4245 return true 4246 } 4247 // match: (CMOVLCC _ x (FlagEQ)) 4248 // result: x 4249 for { 4250 x := v_1 4251 if v_2.Op != OpAMD64FlagEQ { 4252 break 4253 } 4254 v.copyOf(x) 4255 return true 4256 } 4257 // match: (CMOVLCC _ x (FlagGT_UGT)) 4258 // result: x 4259 for { 4260 x := v_1 4261 if v_2.Op != OpAMD64FlagGT_UGT { 4262 break 4263 } 4264 v.copyOf(x) 4265 return true 4266 } 4267 // match: (CMOVLCC y _ (FlagGT_ULT)) 4268 // result: y 4269 for { 4270 y := v_0 4271 if v_2.Op != OpAMD64FlagGT_ULT { 4272 break 4273 } 4274 v.copyOf(y) 4275 return true 4276 } 4277 // match: (CMOVLCC y _ (FlagLT_ULT)) 4278 // result: y 4279 for { 4280 y := v_0 4281 if v_2.Op != OpAMD64FlagLT_ULT { 4282 break 4283 } 4284 v.copyOf(y) 4285 return true 4286 } 4287 // match: (CMOVLCC _ x (FlagLT_UGT)) 4288 // result: x 4289 for { 4290 x := v_1 4291 if v_2.Op != OpAMD64FlagLT_UGT { 4292 break 4293 } 4294 v.copyOf(x) 4295 return true 4296 } 4297 return false 4298 } 4299 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { 4300 v_2 := v.Args[2] 4301 v_1 := v.Args[1] 4302 v_0 := v.Args[0] 4303 // match: (CMOVLCS x y (InvertFlags cond)) 4304 // result: (CMOVLHI x y cond) 4305 for { 4306 x := v_0 4307 y := v_1 4308 if v_2.Op != OpAMD64InvertFlags { 4309 break 4310 } 4311 cond := v_2.Args[0] 4312 v.reset(OpAMD64CMOVLHI) 4313 v.AddArg3(x, y, cond) 4314 return true 4315 } 4316 // match: (CMOVLCS y _ (FlagEQ)) 4317 // result: y 4318 for { 4319 y := v_0 4320 if v_2.Op != OpAMD64FlagEQ { 4321 break 4322 } 4323 v.copyOf(y) 4324 return true 4325 } 4326 // match: (CMOVLCS y _ (FlagGT_UGT)) 4327 // result: y 4328 for { 4329 y := v_0 4330 if v_2.Op != OpAMD64FlagGT_UGT { 4331 break 4332 } 4333 v.copyOf(y) 4334 return true 4335 } 4336 // match: (CMOVLCS _ x (FlagGT_ULT)) 4337 // result: x 4338 for { 4339 x := v_1 4340 if v_2.Op != OpAMD64FlagGT_ULT { 4341 break 4342 } 4343 v.copyOf(x) 4344 return true 4345 } 4346 // match: (CMOVLCS _ x (FlagLT_ULT)) 4347 // result: x 4348 for { 4349 x := v_1 4350 if v_2.Op != OpAMD64FlagLT_ULT { 4351 break 4352 } 4353 v.copyOf(x) 4354 return true 4355 } 4356 // match: (CMOVLCS y _ (FlagLT_UGT)) 4357 // result: y 4358 for { 4359 y := v_0 4360 if v_2.Op != OpAMD64FlagLT_UGT { 4361 break 4362 } 4363 v.copyOf(y) 4364 return true 4365 } 4366 return false 4367 } 4368 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { 4369 v_2 := v.Args[2] 4370 v_1 := v.Args[1] 4371 v_0 := v.Args[0] 4372 // match: (CMOVLEQ x y (InvertFlags cond)) 4373 // result: (CMOVLEQ x y cond) 4374 for { 4375 x := v_0 4376 y := v_1 4377 if v_2.Op != OpAMD64InvertFlags { 4378 break 4379 } 4380 cond := v_2.Args[0] 4381 v.reset(OpAMD64CMOVLEQ) 4382 v.AddArg3(x, y, cond) 4383 return true 4384 } 4385 // match: (CMOVLEQ _ x (FlagEQ)) 4386 // result: x 4387 for { 4388 x := v_1 4389 if v_2.Op != OpAMD64FlagEQ { 4390 break 4391 } 4392 v.copyOf(x) 4393 return true 4394 } 4395 // match: (CMOVLEQ y _ (FlagGT_UGT)) 4396 // result: y 4397 for { 4398 y := v_0 4399 if v_2.Op != OpAMD64FlagGT_UGT { 4400 break 4401 } 4402 v.copyOf(y) 4403 return true 4404 } 4405 // match: (CMOVLEQ y _ (FlagGT_ULT)) 4406 // result: y 4407 for { 4408 y := v_0 4409 if v_2.Op != OpAMD64FlagGT_ULT { 4410 break 4411 } 4412 v.copyOf(y) 4413 return true 4414 } 4415 // match: (CMOVLEQ y _ (FlagLT_ULT)) 4416 // result: y 4417 for { 4418 y := v_0 4419 if v_2.Op != OpAMD64FlagLT_ULT { 4420 break 4421 } 4422 v.copyOf(y) 4423 return true 4424 } 4425 // match: (CMOVLEQ y _ (FlagLT_UGT)) 4426 // result: y 4427 for { 4428 y := v_0 4429 if v_2.Op != OpAMD64FlagLT_UGT { 4430 break 4431 } 4432 v.copyOf(y) 4433 return true 4434 } 4435 return false 4436 } 4437 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { 4438 v_2 := v.Args[2] 4439 v_1 := v.Args[1] 4440 v_0 := v.Args[0] 4441 // match: (CMOVLGE x y (InvertFlags cond)) 4442 // result: (CMOVLLE x y cond) 4443 for { 4444 x := v_0 4445 y := v_1 4446 if v_2.Op != OpAMD64InvertFlags { 4447 break 4448 } 4449 cond := v_2.Args[0] 4450 v.reset(OpAMD64CMOVLLE) 4451 v.AddArg3(x, y, cond) 4452 return true 4453 } 4454 // match: (CMOVLGE _ x (FlagEQ)) 4455 // result: x 4456 for { 4457 x := v_1 4458 if v_2.Op != OpAMD64FlagEQ { 4459 break 4460 } 4461 v.copyOf(x) 4462 return true 4463 } 4464 // match: (CMOVLGE _ x (FlagGT_UGT)) 4465 // result: x 4466 for { 4467 x := v_1 4468 if v_2.Op != OpAMD64FlagGT_UGT { 4469 break 4470 } 4471 v.copyOf(x) 4472 return true 4473 } 4474 // match: (CMOVLGE _ x (FlagGT_ULT)) 4475 // result: x 4476 for { 4477 x := v_1 4478 if v_2.Op != OpAMD64FlagGT_ULT { 4479 break 4480 } 4481 v.copyOf(x) 4482 return true 4483 } 4484 // match: (CMOVLGE y _ (FlagLT_ULT)) 4485 // result: y 4486 for { 4487 y := v_0 4488 if v_2.Op != OpAMD64FlagLT_ULT { 4489 break 4490 } 4491 v.copyOf(y) 4492 return true 4493 } 4494 // match: (CMOVLGE y _ (FlagLT_UGT)) 4495 // result: y 4496 for { 4497 y := v_0 4498 if v_2.Op != OpAMD64FlagLT_UGT { 4499 break 4500 } 4501 v.copyOf(y) 4502 return true 4503 } 4504 return false 4505 } 4506 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { 4507 v_2 := v.Args[2] 4508 v_1 := v.Args[1] 4509 v_0 := v.Args[0] 4510 // match: (CMOVLGT x y (InvertFlags cond)) 4511 // result: (CMOVLLT x y cond) 4512 for { 4513 x := v_0 4514 y := v_1 4515 if v_2.Op != OpAMD64InvertFlags { 4516 break 4517 } 4518 cond := v_2.Args[0] 4519 v.reset(OpAMD64CMOVLLT) 4520 v.AddArg3(x, y, cond) 4521 return true 4522 } 4523 // match: (CMOVLGT y _ (FlagEQ)) 4524 // result: y 4525 for { 4526 y := v_0 4527 if v_2.Op != OpAMD64FlagEQ { 4528 break 4529 } 4530 v.copyOf(y) 4531 return true 4532 } 4533 // match: (CMOVLGT _ x (FlagGT_UGT)) 4534 // result: x 4535 for { 4536 x := v_1 4537 if v_2.Op != OpAMD64FlagGT_UGT { 4538 break 4539 } 4540 v.copyOf(x) 4541 return true 4542 } 4543 // match: (CMOVLGT _ x (FlagGT_ULT)) 4544 // result: x 4545 for { 4546 x := v_1 4547 if v_2.Op != OpAMD64FlagGT_ULT { 4548 break 4549 } 4550 v.copyOf(x) 4551 return true 4552 } 4553 // match: (CMOVLGT y _ (FlagLT_ULT)) 4554 // result: y 4555 for { 4556 y := v_0 4557 if v_2.Op != OpAMD64FlagLT_ULT { 4558 break 4559 } 4560 v.copyOf(y) 4561 return true 4562 } 4563 // match: (CMOVLGT y _ (FlagLT_UGT)) 4564 // result: y 4565 for { 4566 y := v_0 4567 if v_2.Op != OpAMD64FlagLT_UGT { 4568 break 4569 } 4570 v.copyOf(y) 4571 return true 4572 } 4573 return false 4574 } 4575 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { 4576 v_2 := v.Args[2] 4577 v_1 := v.Args[1] 4578 v_0 := v.Args[0] 4579 // match: (CMOVLHI x y (InvertFlags cond)) 4580 // result: (CMOVLCS x y cond) 4581 for { 4582 x := v_0 4583 y := v_1 4584 if v_2.Op != OpAMD64InvertFlags { 4585 break 4586 } 4587 cond := v_2.Args[0] 4588 v.reset(OpAMD64CMOVLCS) 4589 v.AddArg3(x, y, cond) 4590 return true 4591 } 4592 // match: (CMOVLHI y _ (FlagEQ)) 4593 // result: y 4594 for { 4595 y := v_0 4596 if v_2.Op != OpAMD64FlagEQ { 4597 break 4598 } 4599 v.copyOf(y) 4600 return true 4601 } 4602 // match: (CMOVLHI _ x (FlagGT_UGT)) 4603 // result: x 4604 for { 4605 x := v_1 4606 if v_2.Op != OpAMD64FlagGT_UGT { 4607 break 4608 } 4609 v.copyOf(x) 4610 return true 4611 } 4612 // match: (CMOVLHI y _ (FlagGT_ULT)) 4613 // result: y 4614 for { 4615 y := v_0 4616 if v_2.Op != OpAMD64FlagGT_ULT { 4617 break 4618 } 4619 v.copyOf(y) 4620 return true 4621 } 4622 // match: (CMOVLHI y _ (FlagLT_ULT)) 4623 // result: y 4624 for { 4625 y := v_0 4626 if v_2.Op != OpAMD64FlagLT_ULT { 4627 break 4628 } 4629 v.copyOf(y) 4630 return true 4631 } 4632 // match: (CMOVLHI _ x (FlagLT_UGT)) 4633 // result: x 4634 for { 4635 x := v_1 4636 if v_2.Op != OpAMD64FlagLT_UGT { 4637 break 4638 } 4639 v.copyOf(x) 4640 return true 4641 } 4642 return false 4643 } 4644 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { 4645 v_2 := v.Args[2] 4646 v_1 := v.Args[1] 4647 v_0 := v.Args[0] 4648 // match: (CMOVLLE x y (InvertFlags cond)) 4649 // result: (CMOVLGE x y cond) 4650 for { 4651 x := v_0 4652 y := v_1 4653 if v_2.Op != OpAMD64InvertFlags { 4654 break 4655 } 4656 cond := v_2.Args[0] 4657 v.reset(OpAMD64CMOVLGE) 4658 v.AddArg3(x, y, cond) 4659 return true 4660 } 4661 // match: (CMOVLLE _ x (FlagEQ)) 4662 // result: x 4663 for { 4664 x := v_1 4665 if v_2.Op != OpAMD64FlagEQ { 4666 break 4667 } 4668 v.copyOf(x) 4669 return true 4670 } 4671 // match: (CMOVLLE y _ (FlagGT_UGT)) 4672 // result: y 4673 for { 4674 y := v_0 4675 if v_2.Op != OpAMD64FlagGT_UGT { 4676 break 4677 } 4678 v.copyOf(y) 4679 return true 4680 } 4681 // match: (CMOVLLE y _ (FlagGT_ULT)) 4682 // result: y 4683 for { 4684 y := v_0 4685 if v_2.Op != OpAMD64FlagGT_ULT { 4686 break 4687 } 4688 v.copyOf(y) 4689 return true 4690 } 4691 // match: (CMOVLLE _ x (FlagLT_ULT)) 4692 // result: x 4693 for { 4694 x := v_1 4695 if v_2.Op != OpAMD64FlagLT_ULT { 4696 break 4697 } 4698 v.copyOf(x) 4699 return true 4700 } 4701 // match: (CMOVLLE _ x (FlagLT_UGT)) 4702 // result: x 4703 for { 4704 x := v_1 4705 if v_2.Op != OpAMD64FlagLT_UGT { 4706 break 4707 } 4708 v.copyOf(x) 4709 return true 4710 } 4711 return false 4712 } 4713 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { 4714 v_2 := v.Args[2] 4715 v_1 := v.Args[1] 4716 v_0 := v.Args[0] 4717 // match: (CMOVLLS x y (InvertFlags cond)) 4718 // result: (CMOVLCC x y cond) 4719 for { 4720 x := v_0 4721 y := v_1 4722 if v_2.Op != OpAMD64InvertFlags { 4723 break 4724 } 4725 cond := v_2.Args[0] 4726 v.reset(OpAMD64CMOVLCC) 4727 v.AddArg3(x, y, cond) 4728 return true 4729 } 4730 // match: (CMOVLLS _ x (FlagEQ)) 4731 // result: x 4732 for { 4733 x := v_1 4734 if v_2.Op != OpAMD64FlagEQ { 4735 break 4736 } 4737 v.copyOf(x) 4738 return true 4739 } 4740 // match: (CMOVLLS y _ (FlagGT_UGT)) 4741 // result: y 4742 for { 4743 y := v_0 4744 if v_2.Op != OpAMD64FlagGT_UGT { 4745 break 4746 } 4747 v.copyOf(y) 4748 return true 4749 } 4750 // match: (CMOVLLS _ x (FlagGT_ULT)) 4751 // result: x 4752 for { 4753 x := v_1 4754 if v_2.Op != OpAMD64FlagGT_ULT { 4755 break 4756 } 4757 v.copyOf(x) 4758 return true 4759 } 4760 // match: (CMOVLLS _ x (FlagLT_ULT)) 4761 // result: x 4762 for { 4763 x := v_1 4764 if v_2.Op != OpAMD64FlagLT_ULT { 4765 break 4766 } 4767 v.copyOf(x) 4768 return true 4769 } 4770 // match: (CMOVLLS y _ (FlagLT_UGT)) 4771 // result: y 4772 for { 4773 y := v_0 4774 if v_2.Op != OpAMD64FlagLT_UGT { 4775 break 4776 } 4777 v.copyOf(y) 4778 return true 4779 } 4780 return false 4781 } 4782 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { 4783 v_2 := v.Args[2] 4784 v_1 := v.Args[1] 4785 v_0 := v.Args[0] 4786 // match: (CMOVLLT x y (InvertFlags cond)) 4787 // result: (CMOVLGT x y cond) 4788 for { 4789 x := v_0 4790 y := v_1 4791 if v_2.Op != OpAMD64InvertFlags { 4792 break 4793 } 4794 cond := v_2.Args[0] 4795 v.reset(OpAMD64CMOVLGT) 4796 v.AddArg3(x, y, cond) 4797 return true 4798 } 4799 // match: (CMOVLLT y _ (FlagEQ)) 4800 // result: y 4801 for { 4802 y := v_0 4803 if v_2.Op != OpAMD64FlagEQ { 4804 break 4805 } 4806 v.copyOf(y) 4807 return true 4808 } 4809 // match: (CMOVLLT y _ (FlagGT_UGT)) 4810 // result: y 4811 for { 4812 y := v_0 4813 if v_2.Op != OpAMD64FlagGT_UGT { 4814 break 4815 } 4816 v.copyOf(y) 4817 return true 4818 } 4819 // match: (CMOVLLT y _ (FlagGT_ULT)) 4820 // result: y 4821 for { 4822 y := v_0 4823 if v_2.Op != OpAMD64FlagGT_ULT { 4824 break 4825 } 4826 v.copyOf(y) 4827 return true 4828 } 4829 // match: (CMOVLLT _ x (FlagLT_ULT)) 4830 // result: x 4831 for { 4832 x := v_1 4833 if v_2.Op != OpAMD64FlagLT_ULT { 4834 break 4835 } 4836 v.copyOf(x) 4837 return true 4838 } 4839 // match: (CMOVLLT _ x (FlagLT_UGT)) 4840 // result: x 4841 for { 4842 x := v_1 4843 if v_2.Op != OpAMD64FlagLT_UGT { 4844 break 4845 } 4846 v.copyOf(x) 4847 return true 4848 } 4849 return false 4850 } 4851 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { 4852 v_2 := v.Args[2] 4853 v_1 := v.Args[1] 4854 v_0 := v.Args[0] 4855 // match: (CMOVLNE x y (InvertFlags cond)) 4856 // result: (CMOVLNE x y cond) 4857 for { 4858 x := v_0 4859 y := v_1 4860 if v_2.Op != OpAMD64InvertFlags { 4861 break 4862 } 4863 cond := v_2.Args[0] 4864 v.reset(OpAMD64CMOVLNE) 4865 v.AddArg3(x, y, cond) 4866 return true 4867 } 4868 // match: (CMOVLNE y _ (FlagEQ)) 4869 // result: y 4870 for { 4871 y := v_0 4872 if v_2.Op != OpAMD64FlagEQ { 4873 break 4874 } 4875 v.copyOf(y) 4876 return true 4877 } 4878 // match: (CMOVLNE _ x (FlagGT_UGT)) 4879 // result: x 4880 for { 4881 x := v_1 4882 if v_2.Op != OpAMD64FlagGT_UGT { 4883 break 4884 } 4885 v.copyOf(x) 4886 return true 4887 } 4888 // match: (CMOVLNE _ x (FlagGT_ULT)) 4889 // result: x 4890 for { 4891 x := v_1 4892 if v_2.Op != OpAMD64FlagGT_ULT { 4893 break 4894 } 4895 v.copyOf(x) 4896 return true 4897 } 4898 // match: (CMOVLNE _ x (FlagLT_ULT)) 4899 // result: x 4900 for { 4901 x := v_1 4902 if v_2.Op != OpAMD64FlagLT_ULT { 4903 break 4904 } 4905 v.copyOf(x) 4906 return true 4907 } 4908 // match: (CMOVLNE _ x (FlagLT_UGT)) 4909 // result: x 4910 for { 4911 x := v_1 4912 if v_2.Op != OpAMD64FlagLT_UGT { 4913 break 4914 } 4915 v.copyOf(x) 4916 return true 4917 } 4918 return false 4919 } 4920 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { 4921 v_2 := v.Args[2] 4922 v_1 := v.Args[1] 4923 v_0 := v.Args[0] 4924 // match: (CMOVQCC x y (InvertFlags cond)) 4925 // result: (CMOVQLS x y cond) 4926 for { 4927 x := v_0 4928 y := v_1 4929 if v_2.Op != OpAMD64InvertFlags { 4930 break 4931 } 4932 cond := v_2.Args[0] 4933 v.reset(OpAMD64CMOVQLS) 4934 v.AddArg3(x, y, cond) 4935 return true 4936 } 4937 // match: (CMOVQCC _ x (FlagEQ)) 4938 // result: x 4939 for { 4940 x := v_1 4941 if v_2.Op != OpAMD64FlagEQ { 4942 break 4943 } 4944 v.copyOf(x) 4945 return true 4946 } 4947 // match: (CMOVQCC _ x (FlagGT_UGT)) 4948 // result: x 4949 for { 4950 x := v_1 4951 if v_2.Op != OpAMD64FlagGT_UGT { 4952 break 4953 } 4954 v.copyOf(x) 4955 return true 4956 } 4957 // match: (CMOVQCC y _ (FlagGT_ULT)) 4958 // result: y 4959 for { 4960 y := v_0 4961 if v_2.Op != OpAMD64FlagGT_ULT { 4962 break 4963 } 4964 v.copyOf(y) 4965 return true 4966 } 4967 // match: (CMOVQCC y _ (FlagLT_ULT)) 4968 // result: y 4969 for { 4970 y := v_0 4971 if v_2.Op != OpAMD64FlagLT_ULT { 4972 break 4973 } 4974 v.copyOf(y) 4975 return true 4976 } 4977 // match: (CMOVQCC _ x (FlagLT_UGT)) 4978 // result: x 4979 for { 4980 x := v_1 4981 if v_2.Op != OpAMD64FlagLT_UGT { 4982 break 4983 } 4984 v.copyOf(x) 4985 return true 4986 } 4987 return false 4988 } 4989 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { 4990 v_2 := v.Args[2] 4991 v_1 := v.Args[1] 4992 v_0 := v.Args[0] 4993 // match: (CMOVQCS x y (InvertFlags cond)) 4994 // result: (CMOVQHI x y cond) 4995 for { 4996 x := v_0 4997 y := v_1 4998 if v_2.Op != OpAMD64InvertFlags { 4999 break 5000 } 5001 cond := v_2.Args[0] 5002 v.reset(OpAMD64CMOVQHI) 5003 v.AddArg3(x, y, cond) 5004 return true 5005 } 5006 // match: (CMOVQCS y _ (FlagEQ)) 5007 // result: y 5008 for { 5009 y := v_0 5010 if v_2.Op != OpAMD64FlagEQ { 5011 break 5012 } 5013 v.copyOf(y) 5014 return true 5015 } 5016 // match: (CMOVQCS y _ (FlagGT_UGT)) 5017 // result: y 5018 for { 5019 y := v_0 5020 if v_2.Op != OpAMD64FlagGT_UGT { 5021 break 5022 } 5023 v.copyOf(y) 5024 return true 5025 } 5026 // match: (CMOVQCS _ x (FlagGT_ULT)) 5027 // result: x 5028 for { 5029 x := v_1 5030 if v_2.Op != OpAMD64FlagGT_ULT { 5031 break 5032 } 5033 v.copyOf(x) 5034 return true 5035 } 5036 // match: (CMOVQCS _ x (FlagLT_ULT)) 5037 // result: x 5038 for { 5039 x := v_1 5040 if v_2.Op != OpAMD64FlagLT_ULT { 5041 break 5042 } 5043 v.copyOf(x) 5044 return true 5045 } 5046 // match: (CMOVQCS y _ (FlagLT_UGT)) 5047 // result: y 5048 for { 5049 y := v_0 5050 if v_2.Op != OpAMD64FlagLT_UGT { 5051 break 5052 } 5053 v.copyOf(y) 5054 return true 5055 } 5056 return false 5057 } 5058 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { 5059 v_2 := v.Args[2] 5060 v_1 := v.Args[1] 5061 v_0 := v.Args[0] 5062 // match: (CMOVQEQ x y (InvertFlags cond)) 5063 // result: (CMOVQEQ x y cond) 5064 for { 5065 x := v_0 5066 y := v_1 5067 if v_2.Op != OpAMD64InvertFlags { 5068 break 5069 } 5070 cond := v_2.Args[0] 5071 v.reset(OpAMD64CMOVQEQ) 5072 v.AddArg3(x, y, cond) 5073 return true 5074 } 5075 // match: (CMOVQEQ _ x (FlagEQ)) 5076 // result: x 5077 for { 5078 x := v_1 5079 if v_2.Op != OpAMD64FlagEQ { 5080 break 5081 } 5082 v.copyOf(x) 5083 return true 5084 } 5085 // match: (CMOVQEQ y _ (FlagGT_UGT)) 5086 // result: y 5087 for { 5088 y := v_0 5089 if v_2.Op != OpAMD64FlagGT_UGT { 5090 break 5091 } 5092 v.copyOf(y) 5093 return true 5094 } 5095 // match: (CMOVQEQ y _ (FlagGT_ULT)) 5096 // result: y 5097 for { 5098 y := v_0 5099 if v_2.Op != OpAMD64FlagGT_ULT { 5100 break 5101 } 5102 v.copyOf(y) 5103 return true 5104 } 5105 // match: (CMOVQEQ y _ (FlagLT_ULT)) 5106 // result: y 5107 for { 5108 y := v_0 5109 if v_2.Op != OpAMD64FlagLT_ULT { 5110 break 5111 } 5112 v.copyOf(y) 5113 return true 5114 } 5115 // match: (CMOVQEQ y _ (FlagLT_UGT)) 5116 // result: y 5117 for { 5118 y := v_0 5119 if v_2.Op != OpAMD64FlagLT_UGT { 5120 break 5121 } 5122 v.copyOf(y) 5123 return true 5124 } 5125 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 5126 // cond: c != 0 5127 // result: x 5128 for { 5129 x := v_0 5130 if v_2.Op != OpSelect1 { 5131 break 5132 } 5133 v_2_0 := v_2.Args[0] 5134 if v_2_0.Op != OpAMD64BSFQ { 5135 break 5136 } 5137 v_2_0_0 := v_2_0.Args[0] 5138 if v_2_0_0.Op != OpAMD64ORQconst { 5139 break 5140 } 5141 c := auxIntToInt32(v_2_0_0.AuxInt) 5142 if !(c != 0) { 5143 break 5144 } 5145 v.copyOf(x) 5146 return true 5147 } 5148 // match: (CMOVQEQ x _ (Select1 (BSRQ (ORQconst [c] _)))) 5149 // cond: c != 0 5150 // result: x 5151 for { 5152 x := v_0 5153 if v_2.Op != OpSelect1 { 5154 break 5155 } 5156 v_2_0 := v_2.Args[0] 5157 if v_2_0.Op != OpAMD64BSRQ { 5158 break 5159 } 5160 v_2_0_0 := v_2_0.Args[0] 5161 if v_2_0_0.Op != OpAMD64ORQconst { 5162 break 5163 } 5164 c := auxIntToInt32(v_2_0_0.AuxInt) 5165 if !(c != 0) { 5166 break 5167 } 5168 v.copyOf(x) 5169 return true 5170 } 5171 return false 5172 } 5173 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { 5174 v_2 := v.Args[2] 5175 v_1 := v.Args[1] 5176 v_0 := v.Args[0] 5177 // match: (CMOVQGE x y (InvertFlags cond)) 5178 // result: (CMOVQLE x y cond) 5179 for { 5180 x := v_0 5181 y := v_1 5182 if v_2.Op != OpAMD64InvertFlags { 5183 break 5184 } 5185 cond := v_2.Args[0] 5186 v.reset(OpAMD64CMOVQLE) 5187 v.AddArg3(x, y, cond) 5188 return true 5189 } 5190 // match: (CMOVQGE _ x (FlagEQ)) 5191 // result: x 5192 for { 5193 x := v_1 5194 if v_2.Op != OpAMD64FlagEQ { 5195 break 5196 } 5197 v.copyOf(x) 5198 return true 5199 } 5200 // match: (CMOVQGE _ x (FlagGT_UGT)) 5201 // result: x 5202 for { 5203 x := v_1 5204 if v_2.Op != OpAMD64FlagGT_UGT { 5205 break 5206 } 5207 v.copyOf(x) 5208 return true 5209 } 5210 // match: (CMOVQGE _ x (FlagGT_ULT)) 5211 // result: x 5212 for { 5213 x := v_1 5214 if v_2.Op != OpAMD64FlagGT_ULT { 5215 break 5216 } 5217 v.copyOf(x) 5218 return true 5219 } 5220 // match: (CMOVQGE y _ (FlagLT_ULT)) 5221 // result: y 5222 for { 5223 y := v_0 5224 if v_2.Op != OpAMD64FlagLT_ULT { 5225 break 5226 } 5227 v.copyOf(y) 5228 return true 5229 } 5230 // match: (CMOVQGE y _ (FlagLT_UGT)) 5231 // result: y 5232 for { 5233 y := v_0 5234 if v_2.Op != OpAMD64FlagLT_UGT { 5235 break 5236 } 5237 v.copyOf(y) 5238 return true 5239 } 5240 return false 5241 } 5242 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { 5243 v_2 := v.Args[2] 5244 v_1 := v.Args[1] 5245 v_0 := v.Args[0] 5246 // match: (CMOVQGT x y (InvertFlags cond)) 5247 // result: (CMOVQLT x y cond) 5248 for { 5249 x := v_0 5250 y := v_1 5251 if v_2.Op != OpAMD64InvertFlags { 5252 break 5253 } 5254 cond := v_2.Args[0] 5255 v.reset(OpAMD64CMOVQLT) 5256 v.AddArg3(x, y, cond) 5257 return true 5258 } 5259 // match: (CMOVQGT y _ (FlagEQ)) 5260 // result: y 5261 for { 5262 y := v_0 5263 if v_2.Op != OpAMD64FlagEQ { 5264 break 5265 } 5266 v.copyOf(y) 5267 return true 5268 } 5269 // match: (CMOVQGT _ x (FlagGT_UGT)) 5270 // result: x 5271 for { 5272 x := v_1 5273 if v_2.Op != OpAMD64FlagGT_UGT { 5274 break 5275 } 5276 v.copyOf(x) 5277 return true 5278 } 5279 // match: (CMOVQGT _ x (FlagGT_ULT)) 5280 // result: x 5281 for { 5282 x := v_1 5283 if v_2.Op != OpAMD64FlagGT_ULT { 5284 break 5285 } 5286 v.copyOf(x) 5287 return true 5288 } 5289 // match: (CMOVQGT y _ (FlagLT_ULT)) 5290 // result: y 5291 for { 5292 y := v_0 5293 if v_2.Op != OpAMD64FlagLT_ULT { 5294 break 5295 } 5296 v.copyOf(y) 5297 return true 5298 } 5299 // match: (CMOVQGT y _ (FlagLT_UGT)) 5300 // result: y 5301 for { 5302 y := v_0 5303 if v_2.Op != OpAMD64FlagLT_UGT { 5304 break 5305 } 5306 v.copyOf(y) 5307 return true 5308 } 5309 return false 5310 } 5311 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { 5312 v_2 := v.Args[2] 5313 v_1 := v.Args[1] 5314 v_0 := v.Args[0] 5315 // match: (CMOVQHI x y (InvertFlags cond)) 5316 // result: (CMOVQCS x y cond) 5317 for { 5318 x := v_0 5319 y := v_1 5320 if v_2.Op != OpAMD64InvertFlags { 5321 break 5322 } 5323 cond := v_2.Args[0] 5324 v.reset(OpAMD64CMOVQCS) 5325 v.AddArg3(x, y, cond) 5326 return true 5327 } 5328 // match: (CMOVQHI y _ (FlagEQ)) 5329 // result: y 5330 for { 5331 y := v_0 5332 if v_2.Op != OpAMD64FlagEQ { 5333 break 5334 } 5335 v.copyOf(y) 5336 return true 5337 } 5338 // match: (CMOVQHI _ x (FlagGT_UGT)) 5339 // result: x 5340 for { 5341 x := v_1 5342 if v_2.Op != OpAMD64FlagGT_UGT { 5343 break 5344 } 5345 v.copyOf(x) 5346 return true 5347 } 5348 // match: (CMOVQHI y _ (FlagGT_ULT)) 5349 // result: y 5350 for { 5351 y := v_0 5352 if v_2.Op != OpAMD64FlagGT_ULT { 5353 break 5354 } 5355 v.copyOf(y) 5356 return true 5357 } 5358 // match: (CMOVQHI y _ (FlagLT_ULT)) 5359 // result: y 5360 for { 5361 y := v_0 5362 if v_2.Op != OpAMD64FlagLT_ULT { 5363 break 5364 } 5365 v.copyOf(y) 5366 return true 5367 } 5368 // match: (CMOVQHI _ x (FlagLT_UGT)) 5369 // result: x 5370 for { 5371 x := v_1 5372 if v_2.Op != OpAMD64FlagLT_UGT { 5373 break 5374 } 5375 v.copyOf(x) 5376 return true 5377 } 5378 return false 5379 } 5380 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { 5381 v_2 := v.Args[2] 5382 v_1 := v.Args[1] 5383 v_0 := v.Args[0] 5384 // match: (CMOVQLE x y (InvertFlags cond)) 5385 // result: (CMOVQGE x y cond) 5386 for { 5387 x := v_0 5388 y := v_1 5389 if v_2.Op != OpAMD64InvertFlags { 5390 break 5391 } 5392 cond := v_2.Args[0] 5393 v.reset(OpAMD64CMOVQGE) 5394 v.AddArg3(x, y, cond) 5395 return true 5396 } 5397 // match: (CMOVQLE _ x (FlagEQ)) 5398 // result: x 5399 for { 5400 x := v_1 5401 if v_2.Op != OpAMD64FlagEQ { 5402 break 5403 } 5404 v.copyOf(x) 5405 return true 5406 } 5407 // match: (CMOVQLE y _ (FlagGT_UGT)) 5408 // result: y 5409 for { 5410 y := v_0 5411 if v_2.Op != OpAMD64FlagGT_UGT { 5412 break 5413 } 5414 v.copyOf(y) 5415 return true 5416 } 5417 // match: (CMOVQLE y _ (FlagGT_ULT)) 5418 // result: y 5419 for { 5420 y := v_0 5421 if v_2.Op != OpAMD64FlagGT_ULT { 5422 break 5423 } 5424 v.copyOf(y) 5425 return true 5426 } 5427 // match: (CMOVQLE _ x (FlagLT_ULT)) 5428 // result: x 5429 for { 5430 x := v_1 5431 if v_2.Op != OpAMD64FlagLT_ULT { 5432 break 5433 } 5434 v.copyOf(x) 5435 return true 5436 } 5437 // match: (CMOVQLE _ x (FlagLT_UGT)) 5438 // result: x 5439 for { 5440 x := v_1 5441 if v_2.Op != OpAMD64FlagLT_UGT { 5442 break 5443 } 5444 v.copyOf(x) 5445 return true 5446 } 5447 return false 5448 } 5449 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { 5450 v_2 := v.Args[2] 5451 v_1 := v.Args[1] 5452 v_0 := v.Args[0] 5453 // match: (CMOVQLS x y (InvertFlags cond)) 5454 // result: (CMOVQCC x y cond) 5455 for { 5456 x := v_0 5457 y := v_1 5458 if v_2.Op != OpAMD64InvertFlags { 5459 break 5460 } 5461 cond := v_2.Args[0] 5462 v.reset(OpAMD64CMOVQCC) 5463 v.AddArg3(x, y, cond) 5464 return true 5465 } 5466 // match: (CMOVQLS _ x (FlagEQ)) 5467 // result: x 5468 for { 5469 x := v_1 5470 if v_2.Op != OpAMD64FlagEQ { 5471 break 5472 } 5473 v.copyOf(x) 5474 return true 5475 } 5476 // match: (CMOVQLS y _ (FlagGT_UGT)) 5477 // result: y 5478 for { 5479 y := v_0 5480 if v_2.Op != OpAMD64FlagGT_UGT { 5481 break 5482 } 5483 v.copyOf(y) 5484 return true 5485 } 5486 // match: (CMOVQLS _ x (FlagGT_ULT)) 5487 // result: x 5488 for { 5489 x := v_1 5490 if v_2.Op != OpAMD64FlagGT_ULT { 5491 break 5492 } 5493 v.copyOf(x) 5494 return true 5495 } 5496 // match: (CMOVQLS _ x (FlagLT_ULT)) 5497 // result: x 5498 for { 5499 x := v_1 5500 if v_2.Op != OpAMD64FlagLT_ULT { 5501 break 5502 } 5503 v.copyOf(x) 5504 return true 5505 } 5506 // match: (CMOVQLS y _ (FlagLT_UGT)) 5507 // result: y 5508 for { 5509 y := v_0 5510 if v_2.Op != OpAMD64FlagLT_UGT { 5511 break 5512 } 5513 v.copyOf(y) 5514 return true 5515 } 5516 return false 5517 } 5518 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { 5519 v_2 := v.Args[2] 5520 v_1 := v.Args[1] 5521 v_0 := v.Args[0] 5522 // match: (CMOVQLT x y (InvertFlags cond)) 5523 // result: (CMOVQGT x y cond) 5524 for { 5525 x := v_0 5526 y := v_1 5527 if v_2.Op != OpAMD64InvertFlags { 5528 break 5529 } 5530 cond := v_2.Args[0] 5531 v.reset(OpAMD64CMOVQGT) 5532 v.AddArg3(x, y, cond) 5533 return true 5534 } 5535 // match: (CMOVQLT y _ (FlagEQ)) 5536 // result: y 5537 for { 5538 y := v_0 5539 if v_2.Op != OpAMD64FlagEQ { 5540 break 5541 } 5542 v.copyOf(y) 5543 return true 5544 } 5545 // match: (CMOVQLT y _ (FlagGT_UGT)) 5546 // result: y 5547 for { 5548 y := v_0 5549 if v_2.Op != OpAMD64FlagGT_UGT { 5550 break 5551 } 5552 v.copyOf(y) 5553 return true 5554 } 5555 // match: (CMOVQLT y _ (FlagGT_ULT)) 5556 // result: y 5557 for { 5558 y := v_0 5559 if v_2.Op != OpAMD64FlagGT_ULT { 5560 break 5561 } 5562 v.copyOf(y) 5563 return true 5564 } 5565 // match: (CMOVQLT _ x (FlagLT_ULT)) 5566 // result: x 5567 for { 5568 x := v_1 5569 if v_2.Op != OpAMD64FlagLT_ULT { 5570 break 5571 } 5572 v.copyOf(x) 5573 return true 5574 } 5575 // match: (CMOVQLT _ x (FlagLT_UGT)) 5576 // result: x 5577 for { 5578 x := v_1 5579 if v_2.Op != OpAMD64FlagLT_UGT { 5580 break 5581 } 5582 v.copyOf(x) 5583 return true 5584 } 5585 return false 5586 } 5587 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { 5588 v_2 := v.Args[2] 5589 v_1 := v.Args[1] 5590 v_0 := v.Args[0] 5591 // match: (CMOVQNE x y (InvertFlags cond)) 5592 // result: (CMOVQNE x y cond) 5593 for { 5594 x := v_0 5595 y := v_1 5596 if v_2.Op != OpAMD64InvertFlags { 5597 break 5598 } 5599 cond := v_2.Args[0] 5600 v.reset(OpAMD64CMOVQNE) 5601 v.AddArg3(x, y, cond) 5602 return true 5603 } 5604 // match: (CMOVQNE y _ (FlagEQ)) 5605 // result: y 5606 for { 5607 y := v_0 5608 if v_2.Op != OpAMD64FlagEQ { 5609 break 5610 } 5611 v.copyOf(y) 5612 return true 5613 } 5614 // match: (CMOVQNE _ x (FlagGT_UGT)) 5615 // result: x 5616 for { 5617 x := v_1 5618 if v_2.Op != OpAMD64FlagGT_UGT { 5619 break 5620 } 5621 v.copyOf(x) 5622 return true 5623 } 5624 // match: (CMOVQNE _ x (FlagGT_ULT)) 5625 // result: x 5626 for { 5627 x := v_1 5628 if v_2.Op != OpAMD64FlagGT_ULT { 5629 break 5630 } 5631 v.copyOf(x) 5632 return true 5633 } 5634 // match: (CMOVQNE _ x (FlagLT_ULT)) 5635 // result: x 5636 for { 5637 x := v_1 5638 if v_2.Op != OpAMD64FlagLT_ULT { 5639 break 5640 } 5641 v.copyOf(x) 5642 return true 5643 } 5644 // match: (CMOVQNE _ x (FlagLT_UGT)) 5645 // result: x 5646 for { 5647 x := v_1 5648 if v_2.Op != OpAMD64FlagLT_UGT { 5649 break 5650 } 5651 v.copyOf(x) 5652 return true 5653 } 5654 return false 5655 } 5656 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { 5657 v_2 := v.Args[2] 5658 v_1 := v.Args[1] 5659 v_0 := v.Args[0] 5660 // match: (CMOVWCC x y (InvertFlags cond)) 5661 // result: (CMOVWLS x y cond) 5662 for { 5663 x := v_0 5664 y := v_1 5665 if v_2.Op != OpAMD64InvertFlags { 5666 break 5667 } 5668 cond := v_2.Args[0] 5669 v.reset(OpAMD64CMOVWLS) 5670 v.AddArg3(x, y, cond) 5671 return true 5672 } 5673 // match: (CMOVWCC _ x (FlagEQ)) 5674 // result: x 5675 for { 5676 x := v_1 5677 if v_2.Op != OpAMD64FlagEQ { 5678 break 5679 } 5680 v.copyOf(x) 5681 return true 5682 } 5683 // match: (CMOVWCC _ x (FlagGT_UGT)) 5684 // result: x 5685 for { 5686 x := v_1 5687 if v_2.Op != OpAMD64FlagGT_UGT { 5688 break 5689 } 5690 v.copyOf(x) 5691 return true 5692 } 5693 // match: (CMOVWCC y _ (FlagGT_ULT)) 5694 // result: y 5695 for { 5696 y := v_0 5697 if v_2.Op != OpAMD64FlagGT_ULT { 5698 break 5699 } 5700 v.copyOf(y) 5701 return true 5702 } 5703 // match: (CMOVWCC y _ (FlagLT_ULT)) 5704 // result: y 5705 for { 5706 y := v_0 5707 if v_2.Op != OpAMD64FlagLT_ULT { 5708 break 5709 } 5710 v.copyOf(y) 5711 return true 5712 } 5713 // match: (CMOVWCC _ x (FlagLT_UGT)) 5714 // result: x 5715 for { 5716 x := v_1 5717 if v_2.Op != OpAMD64FlagLT_UGT { 5718 break 5719 } 5720 v.copyOf(x) 5721 return true 5722 } 5723 return false 5724 } 5725 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { 5726 v_2 := v.Args[2] 5727 v_1 := v.Args[1] 5728 v_0 := v.Args[0] 5729 // match: (CMOVWCS x y (InvertFlags cond)) 5730 // result: (CMOVWHI x y cond) 5731 for { 5732 x := v_0 5733 y := v_1 5734 if v_2.Op != OpAMD64InvertFlags { 5735 break 5736 } 5737 cond := v_2.Args[0] 5738 v.reset(OpAMD64CMOVWHI) 5739 v.AddArg3(x, y, cond) 5740 return true 5741 } 5742 // match: (CMOVWCS y _ (FlagEQ)) 5743 // result: y 5744 for { 5745 y := v_0 5746 if v_2.Op != OpAMD64FlagEQ { 5747 break 5748 } 5749 v.copyOf(y) 5750 return true 5751 } 5752 // match: (CMOVWCS y _ (FlagGT_UGT)) 5753 // result: y 5754 for { 5755 y := v_0 5756 if v_2.Op != OpAMD64FlagGT_UGT { 5757 break 5758 } 5759 v.copyOf(y) 5760 return true 5761 } 5762 // match: (CMOVWCS _ x (FlagGT_ULT)) 5763 // result: x 5764 for { 5765 x := v_1 5766 if v_2.Op != OpAMD64FlagGT_ULT { 5767 break 5768 } 5769 v.copyOf(x) 5770 return true 5771 } 5772 // match: (CMOVWCS _ x (FlagLT_ULT)) 5773 // result: x 5774 for { 5775 x := v_1 5776 if v_2.Op != OpAMD64FlagLT_ULT { 5777 break 5778 } 5779 v.copyOf(x) 5780 return true 5781 } 5782 // match: (CMOVWCS y _ (FlagLT_UGT)) 5783 // result: y 5784 for { 5785 y := v_0 5786 if v_2.Op != OpAMD64FlagLT_UGT { 5787 break 5788 } 5789 v.copyOf(y) 5790 return true 5791 } 5792 return false 5793 } 5794 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { 5795 v_2 := v.Args[2] 5796 v_1 := v.Args[1] 5797 v_0 := v.Args[0] 5798 // match: (CMOVWEQ x y (InvertFlags cond)) 5799 // result: (CMOVWEQ x y cond) 5800 for { 5801 x := v_0 5802 y := v_1 5803 if v_2.Op != OpAMD64InvertFlags { 5804 break 5805 } 5806 cond := v_2.Args[0] 5807 v.reset(OpAMD64CMOVWEQ) 5808 v.AddArg3(x, y, cond) 5809 return true 5810 } 5811 // match: (CMOVWEQ _ x (FlagEQ)) 5812 // result: x 5813 for { 5814 x := v_1 5815 if v_2.Op != OpAMD64FlagEQ { 5816 break 5817 } 5818 v.copyOf(x) 5819 return true 5820 } 5821 // match: (CMOVWEQ y _ (FlagGT_UGT)) 5822 // result: y 5823 for { 5824 y := v_0 5825 if v_2.Op != OpAMD64FlagGT_UGT { 5826 break 5827 } 5828 v.copyOf(y) 5829 return true 5830 } 5831 // match: (CMOVWEQ y _ (FlagGT_ULT)) 5832 // result: y 5833 for { 5834 y := v_0 5835 if v_2.Op != OpAMD64FlagGT_ULT { 5836 break 5837 } 5838 v.copyOf(y) 5839 return true 5840 } 5841 // match: (CMOVWEQ y _ (FlagLT_ULT)) 5842 // result: y 5843 for { 5844 y := v_0 5845 if v_2.Op != OpAMD64FlagLT_ULT { 5846 break 5847 } 5848 v.copyOf(y) 5849 return true 5850 } 5851 // match: (CMOVWEQ y _ (FlagLT_UGT)) 5852 // result: y 5853 for { 5854 y := v_0 5855 if v_2.Op != OpAMD64FlagLT_UGT { 5856 break 5857 } 5858 v.copyOf(y) 5859 return true 5860 } 5861 return false 5862 } 5863 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { 5864 v_2 := v.Args[2] 5865 v_1 := v.Args[1] 5866 v_0 := v.Args[0] 5867 // match: (CMOVWGE x y (InvertFlags cond)) 5868 // result: (CMOVWLE x y cond) 5869 for { 5870 x := v_0 5871 y := v_1 5872 if v_2.Op != OpAMD64InvertFlags { 5873 break 5874 } 5875 cond := v_2.Args[0] 5876 v.reset(OpAMD64CMOVWLE) 5877 v.AddArg3(x, y, cond) 5878 return true 5879 } 5880 // match: (CMOVWGE _ x (FlagEQ)) 5881 // result: x 5882 for { 5883 x := v_1 5884 if v_2.Op != OpAMD64FlagEQ { 5885 break 5886 } 5887 v.copyOf(x) 5888 return true 5889 } 5890 // match: (CMOVWGE _ x (FlagGT_UGT)) 5891 // result: x 5892 for { 5893 x := v_1 5894 if v_2.Op != OpAMD64FlagGT_UGT { 5895 break 5896 } 5897 v.copyOf(x) 5898 return true 5899 } 5900 // match: (CMOVWGE _ x (FlagGT_ULT)) 5901 // result: x 5902 for { 5903 x := v_1 5904 if v_2.Op != OpAMD64FlagGT_ULT { 5905 break 5906 } 5907 v.copyOf(x) 5908 return true 5909 } 5910 // match: (CMOVWGE y _ (FlagLT_ULT)) 5911 // result: y 5912 for { 5913 y := v_0 5914 if v_2.Op != OpAMD64FlagLT_ULT { 5915 break 5916 } 5917 v.copyOf(y) 5918 return true 5919 } 5920 // match: (CMOVWGE y _ (FlagLT_UGT)) 5921 // result: y 5922 for { 5923 y := v_0 5924 if v_2.Op != OpAMD64FlagLT_UGT { 5925 break 5926 } 5927 v.copyOf(y) 5928 return true 5929 } 5930 return false 5931 } 5932 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { 5933 v_2 := v.Args[2] 5934 v_1 := v.Args[1] 5935 v_0 := v.Args[0] 5936 // match: (CMOVWGT x y (InvertFlags cond)) 5937 // result: (CMOVWLT x y cond) 5938 for { 5939 x := v_0 5940 y := v_1 5941 if v_2.Op != OpAMD64InvertFlags { 5942 break 5943 } 5944 cond := v_2.Args[0] 5945 v.reset(OpAMD64CMOVWLT) 5946 v.AddArg3(x, y, cond) 5947 return true 5948 } 5949 // match: (CMOVWGT y _ (FlagEQ)) 5950 // result: y 5951 for { 5952 y := v_0 5953 if v_2.Op != OpAMD64FlagEQ { 5954 break 5955 } 5956 v.copyOf(y) 5957 return true 5958 } 5959 // match: (CMOVWGT _ x (FlagGT_UGT)) 5960 // result: x 5961 for { 5962 x := v_1 5963 if v_2.Op != OpAMD64FlagGT_UGT { 5964 break 5965 } 5966 v.copyOf(x) 5967 return true 5968 } 5969 // match: (CMOVWGT _ x (FlagGT_ULT)) 5970 // result: x 5971 for { 5972 x := v_1 5973 if v_2.Op != OpAMD64FlagGT_ULT { 5974 break 5975 } 5976 v.copyOf(x) 5977 return true 5978 } 5979 // match: (CMOVWGT y _ (FlagLT_ULT)) 5980 // result: y 5981 for { 5982 y := v_0 5983 if v_2.Op != OpAMD64FlagLT_ULT { 5984 break 5985 } 5986 v.copyOf(y) 5987 return true 5988 } 5989 // match: (CMOVWGT y _ (FlagLT_UGT)) 5990 // result: y 5991 for { 5992 y := v_0 5993 if v_2.Op != OpAMD64FlagLT_UGT { 5994 break 5995 } 5996 v.copyOf(y) 5997 return true 5998 } 5999 return false 6000 } 6001 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { 6002 v_2 := v.Args[2] 6003 v_1 := v.Args[1] 6004 v_0 := v.Args[0] 6005 // match: (CMOVWHI x y (InvertFlags cond)) 6006 // result: (CMOVWCS x y cond) 6007 for { 6008 x := v_0 6009 y := v_1 6010 if v_2.Op != OpAMD64InvertFlags { 6011 break 6012 } 6013 cond := v_2.Args[0] 6014 v.reset(OpAMD64CMOVWCS) 6015 v.AddArg3(x, y, cond) 6016 return true 6017 } 6018 // match: (CMOVWHI y _ (FlagEQ)) 6019 // result: y 6020 for { 6021 y := v_0 6022 if v_2.Op != OpAMD64FlagEQ { 6023 break 6024 } 6025 v.copyOf(y) 6026 return true 6027 } 6028 // match: (CMOVWHI _ x (FlagGT_UGT)) 6029 // result: x 6030 for { 6031 x := v_1 6032 if v_2.Op != OpAMD64FlagGT_UGT { 6033 break 6034 } 6035 v.copyOf(x) 6036 return true 6037 } 6038 // match: (CMOVWHI y _ (FlagGT_ULT)) 6039 // result: y 6040 for { 6041 y := v_0 6042 if v_2.Op != OpAMD64FlagGT_ULT { 6043 break 6044 } 6045 v.copyOf(y) 6046 return true 6047 } 6048 // match: (CMOVWHI y _ (FlagLT_ULT)) 6049 // result: y 6050 for { 6051 y := v_0 6052 if v_2.Op != OpAMD64FlagLT_ULT { 6053 break 6054 } 6055 v.copyOf(y) 6056 return true 6057 } 6058 // match: (CMOVWHI _ x (FlagLT_UGT)) 6059 // result: x 6060 for { 6061 x := v_1 6062 if v_2.Op != OpAMD64FlagLT_UGT { 6063 break 6064 } 6065 v.copyOf(x) 6066 return true 6067 } 6068 return false 6069 } 6070 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { 6071 v_2 := v.Args[2] 6072 v_1 := v.Args[1] 6073 v_0 := v.Args[0] 6074 // match: (CMOVWLE x y (InvertFlags cond)) 6075 // result: (CMOVWGE x y cond) 6076 for { 6077 x := v_0 6078 y := v_1 6079 if v_2.Op != OpAMD64InvertFlags { 6080 break 6081 } 6082 cond := v_2.Args[0] 6083 v.reset(OpAMD64CMOVWGE) 6084 v.AddArg3(x, y, cond) 6085 return true 6086 } 6087 // match: (CMOVWLE _ x (FlagEQ)) 6088 // result: x 6089 for { 6090 x := v_1 6091 if v_2.Op != OpAMD64FlagEQ { 6092 break 6093 } 6094 v.copyOf(x) 6095 return true 6096 } 6097 // match: (CMOVWLE y _ (FlagGT_UGT)) 6098 // result: y 6099 for { 6100 y := v_0 6101 if v_2.Op != OpAMD64FlagGT_UGT { 6102 break 6103 } 6104 v.copyOf(y) 6105 return true 6106 } 6107 // match: (CMOVWLE y _ (FlagGT_ULT)) 6108 // result: y 6109 for { 6110 y := v_0 6111 if v_2.Op != OpAMD64FlagGT_ULT { 6112 break 6113 } 6114 v.copyOf(y) 6115 return true 6116 } 6117 // match: (CMOVWLE _ x (FlagLT_ULT)) 6118 // result: x 6119 for { 6120 x := v_1 6121 if v_2.Op != OpAMD64FlagLT_ULT { 6122 break 6123 } 6124 v.copyOf(x) 6125 return true 6126 } 6127 // match: (CMOVWLE _ x (FlagLT_UGT)) 6128 // result: x 6129 for { 6130 x := v_1 6131 if v_2.Op != OpAMD64FlagLT_UGT { 6132 break 6133 } 6134 v.copyOf(x) 6135 return true 6136 } 6137 return false 6138 } 6139 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { 6140 v_2 := v.Args[2] 6141 v_1 := v.Args[1] 6142 v_0 := v.Args[0] 6143 // match: (CMOVWLS x y (InvertFlags cond)) 6144 // result: (CMOVWCC x y cond) 6145 for { 6146 x := v_0 6147 y := v_1 6148 if v_2.Op != OpAMD64InvertFlags { 6149 break 6150 } 6151 cond := v_2.Args[0] 6152 v.reset(OpAMD64CMOVWCC) 6153 v.AddArg3(x, y, cond) 6154 return true 6155 } 6156 // match: (CMOVWLS _ x (FlagEQ)) 6157 // result: x 6158 for { 6159 x := v_1 6160 if v_2.Op != OpAMD64FlagEQ { 6161 break 6162 } 6163 v.copyOf(x) 6164 return true 6165 } 6166 // match: (CMOVWLS y _ (FlagGT_UGT)) 6167 // result: y 6168 for { 6169 y := v_0 6170 if v_2.Op != OpAMD64FlagGT_UGT { 6171 break 6172 } 6173 v.copyOf(y) 6174 return true 6175 } 6176 // match: (CMOVWLS _ x (FlagGT_ULT)) 6177 // result: x 6178 for { 6179 x := v_1 6180 if v_2.Op != OpAMD64FlagGT_ULT { 6181 break 6182 } 6183 v.copyOf(x) 6184 return true 6185 } 6186 // match: (CMOVWLS _ x (FlagLT_ULT)) 6187 // result: x 6188 for { 6189 x := v_1 6190 if v_2.Op != OpAMD64FlagLT_ULT { 6191 break 6192 } 6193 v.copyOf(x) 6194 return true 6195 } 6196 // match: (CMOVWLS y _ (FlagLT_UGT)) 6197 // result: y 6198 for { 6199 y := v_0 6200 if v_2.Op != OpAMD64FlagLT_UGT { 6201 break 6202 } 6203 v.copyOf(y) 6204 return true 6205 } 6206 return false 6207 } 6208 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { 6209 v_2 := v.Args[2] 6210 v_1 := v.Args[1] 6211 v_0 := v.Args[0] 6212 // match: (CMOVWLT x y (InvertFlags cond)) 6213 // result: (CMOVWGT x y cond) 6214 for { 6215 x := v_0 6216 y := v_1 6217 if v_2.Op != OpAMD64InvertFlags { 6218 break 6219 } 6220 cond := v_2.Args[0] 6221 v.reset(OpAMD64CMOVWGT) 6222 v.AddArg3(x, y, cond) 6223 return true 6224 } 6225 // match: (CMOVWLT y _ (FlagEQ)) 6226 // result: y 6227 for { 6228 y := v_0 6229 if v_2.Op != OpAMD64FlagEQ { 6230 break 6231 } 6232 v.copyOf(y) 6233 return true 6234 } 6235 // match: (CMOVWLT y _ (FlagGT_UGT)) 6236 // result: y 6237 for { 6238 y := v_0 6239 if v_2.Op != OpAMD64FlagGT_UGT { 6240 break 6241 } 6242 v.copyOf(y) 6243 return true 6244 } 6245 // match: (CMOVWLT y _ (FlagGT_ULT)) 6246 // result: y 6247 for { 6248 y := v_0 6249 if v_2.Op != OpAMD64FlagGT_ULT { 6250 break 6251 } 6252 v.copyOf(y) 6253 return true 6254 } 6255 // match: (CMOVWLT _ x (FlagLT_ULT)) 6256 // result: x 6257 for { 6258 x := v_1 6259 if v_2.Op != OpAMD64FlagLT_ULT { 6260 break 6261 } 6262 v.copyOf(x) 6263 return true 6264 } 6265 // match: (CMOVWLT _ x (FlagLT_UGT)) 6266 // result: x 6267 for { 6268 x := v_1 6269 if v_2.Op != OpAMD64FlagLT_UGT { 6270 break 6271 } 6272 v.copyOf(x) 6273 return true 6274 } 6275 return false 6276 } 6277 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { 6278 v_2 := v.Args[2] 6279 v_1 := v.Args[1] 6280 v_0 := v.Args[0] 6281 // match: (CMOVWNE x y (InvertFlags cond)) 6282 // result: (CMOVWNE x y cond) 6283 for { 6284 x := v_0 6285 y := v_1 6286 if v_2.Op != OpAMD64InvertFlags { 6287 break 6288 } 6289 cond := v_2.Args[0] 6290 v.reset(OpAMD64CMOVWNE) 6291 v.AddArg3(x, y, cond) 6292 return true 6293 } 6294 // match: (CMOVWNE y _ (FlagEQ)) 6295 // result: y 6296 for { 6297 y := v_0 6298 if v_2.Op != OpAMD64FlagEQ { 6299 break 6300 } 6301 v.copyOf(y) 6302 return true 6303 } 6304 // match: (CMOVWNE _ x (FlagGT_UGT)) 6305 // result: x 6306 for { 6307 x := v_1 6308 if v_2.Op != OpAMD64FlagGT_UGT { 6309 break 6310 } 6311 v.copyOf(x) 6312 return true 6313 } 6314 // match: (CMOVWNE _ x (FlagGT_ULT)) 6315 // result: x 6316 for { 6317 x := v_1 6318 if v_2.Op != OpAMD64FlagGT_ULT { 6319 break 6320 } 6321 v.copyOf(x) 6322 return true 6323 } 6324 // match: (CMOVWNE _ x (FlagLT_ULT)) 6325 // result: x 6326 for { 6327 x := v_1 6328 if v_2.Op != OpAMD64FlagLT_ULT { 6329 break 6330 } 6331 v.copyOf(x) 6332 return true 6333 } 6334 // match: (CMOVWNE _ x (FlagLT_UGT)) 6335 // result: x 6336 for { 6337 x := v_1 6338 if v_2.Op != OpAMD64FlagLT_UGT { 6339 break 6340 } 6341 v.copyOf(x) 6342 return true 6343 } 6344 return false 6345 } 6346 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { 6347 v_1 := v.Args[1] 6348 v_0 := v.Args[0] 6349 b := v.Block 6350 // match: (CMPB x (MOVLconst [c])) 6351 // result: (CMPBconst x [int8(c)]) 6352 for { 6353 x := v_0 6354 if v_1.Op != OpAMD64MOVLconst { 6355 break 6356 } 6357 c := auxIntToInt32(v_1.AuxInt) 6358 v.reset(OpAMD64CMPBconst) 6359 v.AuxInt = int8ToAuxInt(int8(c)) 6360 v.AddArg(x) 6361 return true 6362 } 6363 // match: (CMPB (MOVLconst [c]) x) 6364 // result: (InvertFlags (CMPBconst x [int8(c)])) 6365 for { 6366 if v_0.Op != OpAMD64MOVLconst { 6367 break 6368 } 6369 c := auxIntToInt32(v_0.AuxInt) 6370 x := v_1 6371 v.reset(OpAMD64InvertFlags) 6372 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 6373 v0.AuxInt = int8ToAuxInt(int8(c)) 6374 v0.AddArg(x) 6375 v.AddArg(v0) 6376 return true 6377 } 6378 // match: (CMPB x y) 6379 // cond: canonLessThan(x,y) 6380 // result: (InvertFlags (CMPB y x)) 6381 for { 6382 x := v_0 6383 y := v_1 6384 if !(canonLessThan(x, y)) { 6385 break 6386 } 6387 v.reset(OpAMD64InvertFlags) 6388 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 6389 v0.AddArg2(y, x) 6390 v.AddArg(v0) 6391 return true 6392 } 6393 // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x) 6394 // cond: canMergeLoad(v, l) && clobber(l) 6395 // result: (CMPBload {sym} [off] ptr x mem) 6396 for { 6397 l := v_0 6398 if l.Op != OpAMD64MOVBload { 6399 break 6400 } 6401 off := auxIntToInt32(l.AuxInt) 6402 sym := auxToSym(l.Aux) 6403 mem := l.Args[1] 6404 ptr := l.Args[0] 6405 x := v_1 6406 if !(canMergeLoad(v, l) && clobber(l)) { 6407 break 6408 } 6409 v.reset(OpAMD64CMPBload) 6410 v.AuxInt = int32ToAuxInt(off) 6411 v.Aux = symToAux(sym) 6412 v.AddArg3(ptr, x, mem) 6413 return true 6414 } 6415 // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) 6416 // cond: canMergeLoad(v, l) && clobber(l) 6417 // result: (InvertFlags (CMPBload {sym} [off] ptr x mem)) 6418 for { 6419 x := v_0 6420 l := v_1 6421 if l.Op != OpAMD64MOVBload { 6422 break 6423 } 6424 off := auxIntToInt32(l.AuxInt) 6425 sym := auxToSym(l.Aux) 6426 mem := l.Args[1] 6427 ptr := l.Args[0] 6428 if !(canMergeLoad(v, l) && clobber(l)) { 6429 break 6430 } 6431 v.reset(OpAMD64InvertFlags) 6432 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags) 6433 v0.AuxInt = int32ToAuxInt(off) 6434 v0.Aux = symToAux(sym) 6435 v0.AddArg3(ptr, x, mem) 6436 v.AddArg(v0) 6437 return true 6438 } 6439 return false 6440 } 6441 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { 6442 v_0 := v.Args[0] 6443 b := v.Block 6444 // match: (CMPBconst (MOVLconst [x]) [y]) 6445 // cond: int8(x)==y 6446 // result: (FlagEQ) 6447 for { 6448 y := auxIntToInt8(v.AuxInt) 6449 if v_0.Op != OpAMD64MOVLconst { 6450 break 6451 } 6452 x := auxIntToInt32(v_0.AuxInt) 6453 if !(int8(x) == y) { 6454 break 6455 } 6456 v.reset(OpAMD64FlagEQ) 6457 return true 6458 } 6459 // match: (CMPBconst (MOVLconst [x]) [y]) 6460 // cond: int8(x)<y && uint8(x)<uint8(y) 6461 // result: (FlagLT_ULT) 6462 for { 6463 y := auxIntToInt8(v.AuxInt) 6464 if v_0.Op != OpAMD64MOVLconst { 6465 break 6466 } 6467 x := auxIntToInt32(v_0.AuxInt) 6468 if !(int8(x) < y && uint8(x) < uint8(y)) { 6469 break 6470 } 6471 v.reset(OpAMD64FlagLT_ULT) 6472 return true 6473 } 6474 // match: (CMPBconst (MOVLconst [x]) [y]) 6475 // cond: int8(x)<y && uint8(x)>uint8(y) 6476 // result: (FlagLT_UGT) 6477 for { 6478 y := auxIntToInt8(v.AuxInt) 6479 if v_0.Op != OpAMD64MOVLconst { 6480 break 6481 } 6482 x := auxIntToInt32(v_0.AuxInt) 6483 if !(int8(x) < y && uint8(x) > uint8(y)) { 6484 break 6485 } 6486 v.reset(OpAMD64FlagLT_UGT) 6487 return true 6488 } 6489 // match: (CMPBconst (MOVLconst [x]) [y]) 6490 // cond: int8(x)>y && uint8(x)<uint8(y) 6491 // result: (FlagGT_ULT) 6492 for { 6493 y := auxIntToInt8(v.AuxInt) 6494 if v_0.Op != OpAMD64MOVLconst { 6495 break 6496 } 6497 x := auxIntToInt32(v_0.AuxInt) 6498 if !(int8(x) > y && uint8(x) < uint8(y)) { 6499 break 6500 } 6501 v.reset(OpAMD64FlagGT_ULT) 6502 return true 6503 } 6504 // match: (CMPBconst (MOVLconst [x]) [y]) 6505 // cond: int8(x)>y && uint8(x)>uint8(y) 6506 // result: (FlagGT_UGT) 6507 for { 6508 y := auxIntToInt8(v.AuxInt) 6509 if v_0.Op != OpAMD64MOVLconst { 6510 break 6511 } 6512 x := auxIntToInt32(v_0.AuxInt) 6513 if !(int8(x) > y && uint8(x) > uint8(y)) { 6514 break 6515 } 6516 v.reset(OpAMD64FlagGT_UGT) 6517 return true 6518 } 6519 // match: (CMPBconst (ANDLconst _ [m]) [n]) 6520 // cond: 0 <= int8(m) && int8(m) < n 6521 // result: (FlagLT_ULT) 6522 for { 6523 n := auxIntToInt8(v.AuxInt) 6524 if v_0.Op != OpAMD64ANDLconst { 6525 break 6526 } 6527 m := auxIntToInt32(v_0.AuxInt) 6528 if !(0 <= int8(m) && int8(m) < n) { 6529 break 6530 } 6531 v.reset(OpAMD64FlagLT_ULT) 6532 return true 6533 } 6534 // match: (CMPBconst a:(ANDL x y) [0]) 6535 // cond: a.Uses == 1 6536 // result: (TESTB x y) 6537 for { 6538 if auxIntToInt8(v.AuxInt) != 0 { 6539 break 6540 } 6541 a := v_0 6542 if a.Op != OpAMD64ANDL { 6543 break 6544 } 6545 y := a.Args[1] 6546 x := a.Args[0] 6547 if !(a.Uses == 1) { 6548 break 6549 } 6550 v.reset(OpAMD64TESTB) 6551 v.AddArg2(x, y) 6552 return true 6553 } 6554 // match: (CMPBconst a:(ANDLconst [c] x) [0]) 6555 // cond: a.Uses == 1 6556 // result: (TESTBconst [int8(c)] x) 6557 for { 6558 if auxIntToInt8(v.AuxInt) != 0 { 6559 break 6560 } 6561 a := v_0 6562 if a.Op != OpAMD64ANDLconst { 6563 break 6564 } 6565 c := auxIntToInt32(a.AuxInt) 6566 x := a.Args[0] 6567 if !(a.Uses == 1) { 6568 break 6569 } 6570 v.reset(OpAMD64TESTBconst) 6571 v.AuxInt = int8ToAuxInt(int8(c)) 6572 v.AddArg(x) 6573 return true 6574 } 6575 // match: (CMPBconst x [0]) 6576 // result: (TESTB x x) 6577 for { 6578 if auxIntToInt8(v.AuxInt) != 0 { 6579 break 6580 } 6581 x := v_0 6582 v.reset(OpAMD64TESTB) 6583 v.AddArg2(x, x) 6584 return true 6585 } 6586 // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) 6587 // cond: l.Uses == 1 && clobber(l) 6588 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) 6589 for { 6590 c := auxIntToInt8(v.AuxInt) 6591 l := v_0 6592 if l.Op != OpAMD64MOVBload { 6593 break 6594 } 6595 off := auxIntToInt32(l.AuxInt) 6596 sym := auxToSym(l.Aux) 6597 mem := l.Args[1] 6598 ptr := l.Args[0] 6599 if !(l.Uses == 1 && clobber(l)) { 6600 break 6601 } 6602 b = l.Block 6603 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) 6604 v.copyOf(v0) 6605 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 6606 v0.Aux = symToAux(sym) 6607 v0.AddArg2(ptr, mem) 6608 return true 6609 } 6610 return false 6611 } 6612 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool { 6613 v_1 := v.Args[1] 6614 v_0 := v.Args[0] 6615 // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 6616 // cond: ValAndOff(valoff1).canAdd32(off2) 6617 // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 6618 for { 6619 valoff1 := auxIntToValAndOff(v.AuxInt) 6620 sym := auxToSym(v.Aux) 6621 if v_0.Op != OpAMD64ADDQconst { 6622 break 6623 } 6624 off2 := auxIntToInt32(v_0.AuxInt) 6625 base := v_0.Args[0] 6626 mem := v_1 6627 if !(ValAndOff(valoff1).canAdd32(off2)) { 6628 break 6629 } 6630 v.reset(OpAMD64CMPBconstload) 6631 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 6632 v.Aux = symToAux(sym) 6633 v.AddArg2(base, mem) 6634 return true 6635 } 6636 // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 6637 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 6638 // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 6639 for { 6640 valoff1 := auxIntToValAndOff(v.AuxInt) 6641 sym1 := auxToSym(v.Aux) 6642 if v_0.Op != OpAMD64LEAQ { 6643 break 6644 } 6645 off2 := auxIntToInt32(v_0.AuxInt) 6646 sym2 := auxToSym(v_0.Aux) 6647 base := v_0.Args[0] 6648 mem := v_1 6649 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 6650 break 6651 } 6652 v.reset(OpAMD64CMPBconstload) 6653 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 6654 v.Aux = symToAux(mergeSym(sym1, sym2)) 6655 v.AddArg2(base, mem) 6656 return true 6657 } 6658 return false 6659 } 6660 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { 6661 v_2 := v.Args[2] 6662 v_1 := v.Args[1] 6663 v_0 := v.Args[0] 6664 // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem) 6665 // cond: is32Bit(int64(off1)+int64(off2)) 6666 // result: (CMPBload [off1+off2] {sym} base val mem) 6667 for { 6668 off1 := auxIntToInt32(v.AuxInt) 6669 sym := auxToSym(v.Aux) 6670 if v_0.Op != OpAMD64ADDQconst { 6671 break 6672 } 6673 off2 := auxIntToInt32(v_0.AuxInt) 6674 base := v_0.Args[0] 6675 val := v_1 6676 mem := v_2 6677 if !(is32Bit(int64(off1) + int64(off2))) { 6678 break 6679 } 6680 v.reset(OpAMD64CMPBload) 6681 v.AuxInt = int32ToAuxInt(off1 + off2) 6682 v.Aux = symToAux(sym) 6683 v.AddArg3(base, val, mem) 6684 return true 6685 } 6686 // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6687 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 6688 // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6689 for { 6690 off1 := auxIntToInt32(v.AuxInt) 6691 sym1 := auxToSym(v.Aux) 6692 if v_0.Op != OpAMD64LEAQ { 6693 break 6694 } 6695 off2 := auxIntToInt32(v_0.AuxInt) 6696 sym2 := auxToSym(v_0.Aux) 6697 base := v_0.Args[0] 6698 val := v_1 6699 mem := v_2 6700 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 6701 break 6702 } 6703 v.reset(OpAMD64CMPBload) 6704 v.AuxInt = int32ToAuxInt(off1 + off2) 6705 v.Aux = symToAux(mergeSym(sym1, sym2)) 6706 v.AddArg3(base, val, mem) 6707 return true 6708 } 6709 // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) 6710 // result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) 6711 for { 6712 off := auxIntToInt32(v.AuxInt) 6713 sym := auxToSym(v.Aux) 6714 ptr := v_0 6715 if v_1.Op != OpAMD64MOVLconst { 6716 break 6717 } 6718 c := auxIntToInt32(v_1.AuxInt) 6719 mem := v_2 6720 v.reset(OpAMD64CMPBconstload) 6721 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) 6722 v.Aux = symToAux(sym) 6723 v.AddArg2(ptr, mem) 6724 return true 6725 } 6726 return false 6727 } 6728 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { 6729 v_1 := v.Args[1] 6730 v_0 := v.Args[0] 6731 b := v.Block 6732 // match: (CMPL x (MOVLconst [c])) 6733 // result: (CMPLconst x [c]) 6734 for { 6735 x := v_0 6736 if v_1.Op != OpAMD64MOVLconst { 6737 break 6738 } 6739 c := auxIntToInt32(v_1.AuxInt) 6740 v.reset(OpAMD64CMPLconst) 6741 v.AuxInt = int32ToAuxInt(c) 6742 v.AddArg(x) 6743 return true 6744 } 6745 // match: (CMPL (MOVLconst [c]) x) 6746 // result: (InvertFlags (CMPLconst x [c])) 6747 for { 6748 if v_0.Op != OpAMD64MOVLconst { 6749 break 6750 } 6751 c := auxIntToInt32(v_0.AuxInt) 6752 x := v_1 6753 v.reset(OpAMD64InvertFlags) 6754 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 6755 v0.AuxInt = int32ToAuxInt(c) 6756 v0.AddArg(x) 6757 v.AddArg(v0) 6758 return true 6759 } 6760 // match: (CMPL x y) 6761 // cond: canonLessThan(x,y) 6762 // result: (InvertFlags (CMPL y x)) 6763 for { 6764 x := v_0 6765 y := v_1 6766 if !(canonLessThan(x, y)) { 6767 break 6768 } 6769 v.reset(OpAMD64InvertFlags) 6770 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 6771 v0.AddArg2(y, x) 6772 v.AddArg(v0) 6773 return true 6774 } 6775 // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x) 6776 // cond: canMergeLoad(v, l) && clobber(l) 6777 // result: (CMPLload {sym} [off] ptr x mem) 6778 for { 6779 l := v_0 6780 if l.Op != OpAMD64MOVLload { 6781 break 6782 } 6783 off := auxIntToInt32(l.AuxInt) 6784 sym := auxToSym(l.Aux) 6785 mem := l.Args[1] 6786 ptr := l.Args[0] 6787 x := v_1 6788 if !(canMergeLoad(v, l) && clobber(l)) { 6789 break 6790 } 6791 v.reset(OpAMD64CMPLload) 6792 v.AuxInt = int32ToAuxInt(off) 6793 v.Aux = symToAux(sym) 6794 v.AddArg3(ptr, x, mem) 6795 return true 6796 } 6797 // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) 6798 // cond: canMergeLoad(v, l) && clobber(l) 6799 // result: (InvertFlags (CMPLload {sym} [off] ptr x mem)) 6800 for { 6801 x := v_0 6802 l := v_1 6803 if l.Op != OpAMD64MOVLload { 6804 break 6805 } 6806 off := auxIntToInt32(l.AuxInt) 6807 sym := auxToSym(l.Aux) 6808 mem := l.Args[1] 6809 ptr := l.Args[0] 6810 if !(canMergeLoad(v, l) && clobber(l)) { 6811 break 6812 } 6813 v.reset(OpAMD64InvertFlags) 6814 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags) 6815 v0.AuxInt = int32ToAuxInt(off) 6816 v0.Aux = symToAux(sym) 6817 v0.AddArg3(ptr, x, mem) 6818 v.AddArg(v0) 6819 return true 6820 } 6821 return false 6822 } 6823 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { 6824 v_0 := v.Args[0] 6825 b := v.Block 6826 // match: (CMPLconst (MOVLconst [x]) [y]) 6827 // cond: x==y 6828 // result: (FlagEQ) 6829 for { 6830 y := auxIntToInt32(v.AuxInt) 6831 if v_0.Op != OpAMD64MOVLconst { 6832 break 6833 } 6834 x := auxIntToInt32(v_0.AuxInt) 6835 if !(x == y) { 6836 break 6837 } 6838 v.reset(OpAMD64FlagEQ) 6839 return true 6840 } 6841 // match: (CMPLconst (MOVLconst [x]) [y]) 6842 // cond: x<y && uint32(x)<uint32(y) 6843 // result: (FlagLT_ULT) 6844 for { 6845 y := auxIntToInt32(v.AuxInt) 6846 if v_0.Op != OpAMD64MOVLconst { 6847 break 6848 } 6849 x := auxIntToInt32(v_0.AuxInt) 6850 if !(x < y && uint32(x) < uint32(y)) { 6851 break 6852 } 6853 v.reset(OpAMD64FlagLT_ULT) 6854 return true 6855 } 6856 // match: (CMPLconst (MOVLconst [x]) [y]) 6857 // cond: x<y && uint32(x)>uint32(y) 6858 // result: (FlagLT_UGT) 6859 for { 6860 y := auxIntToInt32(v.AuxInt) 6861 if v_0.Op != OpAMD64MOVLconst { 6862 break 6863 } 6864 x := auxIntToInt32(v_0.AuxInt) 6865 if !(x < y && uint32(x) > uint32(y)) { 6866 break 6867 } 6868 v.reset(OpAMD64FlagLT_UGT) 6869 return true 6870 } 6871 // match: (CMPLconst (MOVLconst [x]) [y]) 6872 // cond: x>y && uint32(x)<uint32(y) 6873 // result: (FlagGT_ULT) 6874 for { 6875 y := auxIntToInt32(v.AuxInt) 6876 if v_0.Op != OpAMD64MOVLconst { 6877 break 6878 } 6879 x := auxIntToInt32(v_0.AuxInt) 6880 if !(x > y && uint32(x) < uint32(y)) { 6881 break 6882 } 6883 v.reset(OpAMD64FlagGT_ULT) 6884 return true 6885 } 6886 // match: (CMPLconst (MOVLconst [x]) [y]) 6887 // cond: x>y && uint32(x)>uint32(y) 6888 // result: (FlagGT_UGT) 6889 for { 6890 y := auxIntToInt32(v.AuxInt) 6891 if v_0.Op != OpAMD64MOVLconst { 6892 break 6893 } 6894 x := auxIntToInt32(v_0.AuxInt) 6895 if !(x > y && uint32(x) > uint32(y)) { 6896 break 6897 } 6898 v.reset(OpAMD64FlagGT_UGT) 6899 return true 6900 } 6901 // match: (CMPLconst (SHRLconst _ [c]) [n]) 6902 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 6903 // result: (FlagLT_ULT) 6904 for { 6905 n := auxIntToInt32(v.AuxInt) 6906 if v_0.Op != OpAMD64SHRLconst { 6907 break 6908 } 6909 c := auxIntToInt8(v_0.AuxInt) 6910 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 6911 break 6912 } 6913 v.reset(OpAMD64FlagLT_ULT) 6914 return true 6915 } 6916 // match: (CMPLconst (ANDLconst _ [m]) [n]) 6917 // cond: 0 <= m && m < n 6918 // result: (FlagLT_ULT) 6919 for { 6920 n := auxIntToInt32(v.AuxInt) 6921 if v_0.Op != OpAMD64ANDLconst { 6922 break 6923 } 6924 m := auxIntToInt32(v_0.AuxInt) 6925 if !(0 <= m && m < n) { 6926 break 6927 } 6928 v.reset(OpAMD64FlagLT_ULT) 6929 return true 6930 } 6931 // match: (CMPLconst a:(ANDL x y) [0]) 6932 // cond: a.Uses == 1 6933 // result: (TESTL x y) 6934 for { 6935 if auxIntToInt32(v.AuxInt) != 0 { 6936 break 6937 } 6938 a := v_0 6939 if a.Op != OpAMD64ANDL { 6940 break 6941 } 6942 y := a.Args[1] 6943 x := a.Args[0] 6944 if !(a.Uses == 1) { 6945 break 6946 } 6947 v.reset(OpAMD64TESTL) 6948 v.AddArg2(x, y) 6949 return true 6950 } 6951 // match: (CMPLconst a:(ANDLconst [c] x) [0]) 6952 // cond: a.Uses == 1 6953 // result: (TESTLconst [c] x) 6954 for { 6955 if auxIntToInt32(v.AuxInt) != 0 { 6956 break 6957 } 6958 a := v_0 6959 if a.Op != OpAMD64ANDLconst { 6960 break 6961 } 6962 c := auxIntToInt32(a.AuxInt) 6963 x := a.Args[0] 6964 if !(a.Uses == 1) { 6965 break 6966 } 6967 v.reset(OpAMD64TESTLconst) 6968 v.AuxInt = int32ToAuxInt(c) 6969 v.AddArg(x) 6970 return true 6971 } 6972 // match: (CMPLconst x [0]) 6973 // result: (TESTL x x) 6974 for { 6975 if auxIntToInt32(v.AuxInt) != 0 { 6976 break 6977 } 6978 x := v_0 6979 v.reset(OpAMD64TESTL) 6980 v.AddArg2(x, x) 6981 return true 6982 } 6983 // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c]) 6984 // cond: l.Uses == 1 && clobber(l) 6985 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) 6986 for { 6987 c := auxIntToInt32(v.AuxInt) 6988 l := v_0 6989 if l.Op != OpAMD64MOVLload { 6990 break 6991 } 6992 off := auxIntToInt32(l.AuxInt) 6993 sym := auxToSym(l.Aux) 6994 mem := l.Args[1] 6995 ptr := l.Args[0] 6996 if !(l.Uses == 1 && clobber(l)) { 6997 break 6998 } 6999 b = l.Block 7000 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) 7001 v.copyOf(v0) 7002 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) 7003 v0.Aux = symToAux(sym) 7004 v0.AddArg2(ptr, mem) 7005 return true 7006 } 7007 return false 7008 } 7009 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool { 7010 v_1 := v.Args[1] 7011 v_0 := v.Args[0] 7012 // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 7013 // cond: ValAndOff(valoff1).canAdd32(off2) 7014 // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 7015 for { 7016 valoff1 := auxIntToValAndOff(v.AuxInt) 7017 sym := auxToSym(v.Aux) 7018 if v_0.Op != OpAMD64ADDQconst { 7019 break 7020 } 7021 off2 := auxIntToInt32(v_0.AuxInt) 7022 base := v_0.Args[0] 7023 mem := v_1 7024 if !(ValAndOff(valoff1).canAdd32(off2)) { 7025 break 7026 } 7027 v.reset(OpAMD64CMPLconstload) 7028 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 7029 v.Aux = symToAux(sym) 7030 v.AddArg2(base, mem) 7031 return true 7032 } 7033 // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 7034 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 7035 // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 7036 for { 7037 valoff1 := auxIntToValAndOff(v.AuxInt) 7038 sym1 := auxToSym(v.Aux) 7039 if v_0.Op != OpAMD64LEAQ { 7040 break 7041 } 7042 off2 := auxIntToInt32(v_0.AuxInt) 7043 sym2 := auxToSym(v_0.Aux) 7044 base := v_0.Args[0] 7045 mem := v_1 7046 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 7047 break 7048 } 7049 v.reset(OpAMD64CMPLconstload) 7050 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 7051 v.Aux = symToAux(mergeSym(sym1, sym2)) 7052 v.AddArg2(base, mem) 7053 return true 7054 } 7055 return false 7056 } 7057 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { 7058 v_2 := v.Args[2] 7059 v_1 := v.Args[1] 7060 v_0 := v.Args[0] 7061 // match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem) 7062 // cond: is32Bit(int64(off1)+int64(off2)) 7063 // result: (CMPLload [off1+off2] {sym} base val mem) 7064 for { 7065 off1 := auxIntToInt32(v.AuxInt) 7066 sym := auxToSym(v.Aux) 7067 if v_0.Op != OpAMD64ADDQconst { 7068 break 7069 } 7070 off2 := auxIntToInt32(v_0.AuxInt) 7071 base := v_0.Args[0] 7072 val := v_1 7073 mem := v_2 7074 if !(is32Bit(int64(off1) + int64(off2))) { 7075 break 7076 } 7077 v.reset(OpAMD64CMPLload) 7078 v.AuxInt = int32ToAuxInt(off1 + off2) 7079 v.Aux = symToAux(sym) 7080 v.AddArg3(base, val, mem) 7081 return true 7082 } 7083 // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7084 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 7085 // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7086 for { 7087 off1 := auxIntToInt32(v.AuxInt) 7088 sym1 := auxToSym(v.Aux) 7089 if v_0.Op != OpAMD64LEAQ { 7090 break 7091 } 7092 off2 := auxIntToInt32(v_0.AuxInt) 7093 sym2 := auxToSym(v_0.Aux) 7094 base := v_0.Args[0] 7095 val := v_1 7096 mem := v_2 7097 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 7098 break 7099 } 7100 v.reset(OpAMD64CMPLload) 7101 v.AuxInt = int32ToAuxInt(off1 + off2) 7102 v.Aux = symToAux(mergeSym(sym1, sym2)) 7103 v.AddArg3(base, val, mem) 7104 return true 7105 } 7106 // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) 7107 // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) 7108 for { 7109 off := auxIntToInt32(v.AuxInt) 7110 sym := auxToSym(v.Aux) 7111 ptr := v_0 7112 if v_1.Op != OpAMD64MOVLconst { 7113 break 7114 } 7115 c := auxIntToInt32(v_1.AuxInt) 7116 mem := v_2 7117 v.reset(OpAMD64CMPLconstload) 7118 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) 7119 v.Aux = symToAux(sym) 7120 v.AddArg2(ptr, mem) 7121 return true 7122 } 7123 return false 7124 } 7125 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { 7126 v_1 := v.Args[1] 7127 v_0 := v.Args[0] 7128 b := v.Block 7129 // match: (CMPQ x (MOVQconst [c])) 7130 // cond: is32Bit(c) 7131 // result: (CMPQconst x [int32(c)]) 7132 for { 7133 x := v_0 7134 if v_1.Op != OpAMD64MOVQconst { 7135 break 7136 } 7137 c := auxIntToInt64(v_1.AuxInt) 7138 if !(is32Bit(c)) { 7139 break 7140 } 7141 v.reset(OpAMD64CMPQconst) 7142 v.AuxInt = int32ToAuxInt(int32(c)) 7143 v.AddArg(x) 7144 return true 7145 } 7146 // match: (CMPQ (MOVQconst [c]) x) 7147 // cond: is32Bit(c) 7148 // result: (InvertFlags (CMPQconst x [int32(c)])) 7149 for { 7150 if v_0.Op != OpAMD64MOVQconst { 7151 break 7152 } 7153 c := auxIntToInt64(v_0.AuxInt) 7154 x := v_1 7155 if !(is32Bit(c)) { 7156 break 7157 } 7158 v.reset(OpAMD64InvertFlags) 7159 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 7160 v0.AuxInt = int32ToAuxInt(int32(c)) 7161 v0.AddArg(x) 7162 v.AddArg(v0) 7163 return true 7164 } 7165 // match: (CMPQ x y) 7166 // cond: canonLessThan(x,y) 7167 // result: (InvertFlags (CMPQ y x)) 7168 for { 7169 x := v_0 7170 y := v_1 7171 if !(canonLessThan(x, y)) { 7172 break 7173 } 7174 v.reset(OpAMD64InvertFlags) 7175 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 7176 v0.AddArg2(y, x) 7177 v.AddArg(v0) 7178 return true 7179 } 7180 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) 7181 // cond: x==y 7182 // result: (FlagEQ) 7183 for { 7184 if v_0.Op != OpAMD64MOVQconst { 7185 break 7186 } 7187 x := auxIntToInt64(v_0.AuxInt) 7188 if v_1.Op != OpAMD64MOVQconst { 7189 break 7190 } 7191 y := auxIntToInt64(v_1.AuxInt) 7192 if !(x == y) { 7193 break 7194 } 7195 v.reset(OpAMD64FlagEQ) 7196 return true 7197 } 7198 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) 7199 // cond: x<y && uint64(x)<uint64(y) 7200 // result: (FlagLT_ULT) 7201 for { 7202 if v_0.Op != OpAMD64MOVQconst { 7203 break 7204 } 7205 x := auxIntToInt64(v_0.AuxInt) 7206 if v_1.Op != OpAMD64MOVQconst { 7207 break 7208 } 7209 y := auxIntToInt64(v_1.AuxInt) 7210 if !(x < y && uint64(x) < uint64(y)) { 7211 break 7212 } 7213 v.reset(OpAMD64FlagLT_ULT) 7214 return true 7215 } 7216 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) 7217 // cond: x<y && uint64(x)>uint64(y) 7218 // result: (FlagLT_UGT) 7219 for { 7220 if v_0.Op != OpAMD64MOVQconst { 7221 break 7222 } 7223 x := auxIntToInt64(v_0.AuxInt) 7224 if v_1.Op != OpAMD64MOVQconst { 7225 break 7226 } 7227 y := auxIntToInt64(v_1.AuxInt) 7228 if !(x < y && uint64(x) > uint64(y)) { 7229 break 7230 } 7231 v.reset(OpAMD64FlagLT_UGT) 7232 return true 7233 } 7234 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) 7235 // cond: x>y && uint64(x)<uint64(y) 7236 // result: (FlagGT_ULT) 7237 for { 7238 if v_0.Op != OpAMD64MOVQconst { 7239 break 7240 } 7241 x := auxIntToInt64(v_0.AuxInt) 7242 if v_1.Op != OpAMD64MOVQconst { 7243 break 7244 } 7245 y := auxIntToInt64(v_1.AuxInt) 7246 if !(x > y && uint64(x) < uint64(y)) { 7247 break 7248 } 7249 v.reset(OpAMD64FlagGT_ULT) 7250 return true 7251 } 7252 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) 7253 // cond: x>y && uint64(x)>uint64(y) 7254 // result: (FlagGT_UGT) 7255 for { 7256 if v_0.Op != OpAMD64MOVQconst { 7257 break 7258 } 7259 x := auxIntToInt64(v_0.AuxInt) 7260 if v_1.Op != OpAMD64MOVQconst { 7261 break 7262 } 7263 y := auxIntToInt64(v_1.AuxInt) 7264 if !(x > y && uint64(x) > uint64(y)) { 7265 break 7266 } 7267 v.reset(OpAMD64FlagGT_UGT) 7268 return true 7269 } 7270 // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x) 7271 // cond: canMergeLoad(v, l) && clobber(l) 7272 // result: (CMPQload {sym} [off] ptr x mem) 7273 for { 7274 l := v_0 7275 if l.Op != OpAMD64MOVQload { 7276 break 7277 } 7278 off := auxIntToInt32(l.AuxInt) 7279 sym := auxToSym(l.Aux) 7280 mem := l.Args[1] 7281 ptr := l.Args[0] 7282 x := v_1 7283 if !(canMergeLoad(v, l) && clobber(l)) { 7284 break 7285 } 7286 v.reset(OpAMD64CMPQload) 7287 v.AuxInt = int32ToAuxInt(off) 7288 v.Aux = symToAux(sym) 7289 v.AddArg3(ptr, x, mem) 7290 return true 7291 } 7292 // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem)) 7293 // cond: canMergeLoad(v, l) && clobber(l) 7294 // result: (InvertFlags (CMPQload {sym} [off] ptr x mem)) 7295 for { 7296 x := v_0 7297 l := v_1 7298 if l.Op != OpAMD64MOVQload { 7299 break 7300 } 7301 off := auxIntToInt32(l.AuxInt) 7302 sym := auxToSym(l.Aux) 7303 mem := l.Args[1] 7304 ptr := l.Args[0] 7305 if !(canMergeLoad(v, l) && clobber(l)) { 7306 break 7307 } 7308 v.reset(OpAMD64InvertFlags) 7309 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags) 7310 v0.AuxInt = int32ToAuxInt(off) 7311 v0.Aux = symToAux(sym) 7312 v0.AddArg3(ptr, x, mem) 7313 v.AddArg(v0) 7314 return true 7315 } 7316 return false 7317 } 7318 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { 7319 v_0 := v.Args[0] 7320 b := v.Block 7321 // match: (CMPQconst (MOVQconst [x]) [y]) 7322 // cond: x==int64(y) 7323 // result: (FlagEQ) 7324 for { 7325 y := auxIntToInt32(v.AuxInt) 7326 if v_0.Op != OpAMD64MOVQconst { 7327 break 7328 } 7329 x := auxIntToInt64(v_0.AuxInt) 7330 if !(x == int64(y)) { 7331 break 7332 } 7333 v.reset(OpAMD64FlagEQ) 7334 return true 7335 } 7336 // match: (CMPQconst (MOVQconst [x]) [y]) 7337 // cond: x<int64(y) && uint64(x)<uint64(int64(y)) 7338 // result: (FlagLT_ULT) 7339 for { 7340 y := auxIntToInt32(v.AuxInt) 7341 if v_0.Op != OpAMD64MOVQconst { 7342 break 7343 } 7344 x := auxIntToInt64(v_0.AuxInt) 7345 if !(x < int64(y) && uint64(x) < uint64(int64(y))) { 7346 break 7347 } 7348 v.reset(OpAMD64FlagLT_ULT) 7349 return true 7350 } 7351 // match: (CMPQconst (MOVQconst [x]) [y]) 7352 // cond: x<int64(y) && uint64(x)>uint64(int64(y)) 7353 // result: (FlagLT_UGT) 7354 for { 7355 y := auxIntToInt32(v.AuxInt) 7356 if v_0.Op != OpAMD64MOVQconst { 7357 break 7358 } 7359 x := auxIntToInt64(v_0.AuxInt) 7360 if !(x < int64(y) && uint64(x) > uint64(int64(y))) { 7361 break 7362 } 7363 v.reset(OpAMD64FlagLT_UGT) 7364 return true 7365 } 7366 // match: (CMPQconst (MOVQconst [x]) [y]) 7367 // cond: x>int64(y) && uint64(x)<uint64(int64(y)) 7368 // result: (FlagGT_ULT) 7369 for { 7370 y := auxIntToInt32(v.AuxInt) 7371 if v_0.Op != OpAMD64MOVQconst { 7372 break 7373 } 7374 x := auxIntToInt64(v_0.AuxInt) 7375 if !(x > int64(y) && uint64(x) < uint64(int64(y))) { 7376 break 7377 } 7378 v.reset(OpAMD64FlagGT_ULT) 7379 return true 7380 } 7381 // match: (CMPQconst (MOVQconst [x]) [y]) 7382 // cond: x>int64(y) && uint64(x)>uint64(int64(y)) 7383 // result: (FlagGT_UGT) 7384 for { 7385 y := auxIntToInt32(v.AuxInt) 7386 if v_0.Op != OpAMD64MOVQconst { 7387 break 7388 } 7389 x := auxIntToInt64(v_0.AuxInt) 7390 if !(x > int64(y) && uint64(x) > uint64(int64(y))) { 7391 break 7392 } 7393 v.reset(OpAMD64FlagGT_UGT) 7394 return true 7395 } 7396 // match: (CMPQconst (MOVBQZX _) [c]) 7397 // cond: 0xFF < c 7398 // result: (FlagLT_ULT) 7399 for { 7400 c := auxIntToInt32(v.AuxInt) 7401 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) { 7402 break 7403 } 7404 v.reset(OpAMD64FlagLT_ULT) 7405 return true 7406 } 7407 // match: (CMPQconst (MOVWQZX _) [c]) 7408 // cond: 0xFFFF < c 7409 // result: (FlagLT_ULT) 7410 for { 7411 c := auxIntToInt32(v.AuxInt) 7412 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) { 7413 break 7414 } 7415 v.reset(OpAMD64FlagLT_ULT) 7416 return true 7417 } 7418 // match: (CMPQconst (SHRQconst _ [c]) [n]) 7419 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 7420 // result: (FlagLT_ULT) 7421 for { 7422 n := auxIntToInt32(v.AuxInt) 7423 if v_0.Op != OpAMD64SHRQconst { 7424 break 7425 } 7426 c := auxIntToInt8(v_0.AuxInt) 7427 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 7428 break 7429 } 7430 v.reset(OpAMD64FlagLT_ULT) 7431 return true 7432 } 7433 // match: (CMPQconst (ANDQconst _ [m]) [n]) 7434 // cond: 0 <= m && m < n 7435 // result: (FlagLT_ULT) 7436 for { 7437 n := auxIntToInt32(v.AuxInt) 7438 if v_0.Op != OpAMD64ANDQconst { 7439 break 7440 } 7441 m := auxIntToInt32(v_0.AuxInt) 7442 if !(0 <= m && m < n) { 7443 break 7444 } 7445 v.reset(OpAMD64FlagLT_ULT) 7446 return true 7447 } 7448 // match: (CMPQconst (ANDLconst _ [m]) [n]) 7449 // cond: 0 <= m && m < n 7450 // result: (FlagLT_ULT) 7451 for { 7452 n := auxIntToInt32(v.AuxInt) 7453 if v_0.Op != OpAMD64ANDLconst { 7454 break 7455 } 7456 m := auxIntToInt32(v_0.AuxInt) 7457 if !(0 <= m && m < n) { 7458 break 7459 } 7460 v.reset(OpAMD64FlagLT_ULT) 7461 return true 7462 } 7463 // match: (CMPQconst a:(ANDQ x y) [0]) 7464 // cond: a.Uses == 1 7465 // result: (TESTQ x y) 7466 for { 7467 if auxIntToInt32(v.AuxInt) != 0 { 7468 break 7469 } 7470 a := v_0 7471 if a.Op != OpAMD64ANDQ { 7472 break 7473 } 7474 y := a.Args[1] 7475 x := a.Args[0] 7476 if !(a.Uses == 1) { 7477 break 7478 } 7479 v.reset(OpAMD64TESTQ) 7480 v.AddArg2(x, y) 7481 return true 7482 } 7483 // match: (CMPQconst a:(ANDQconst [c] x) [0]) 7484 // cond: a.Uses == 1 7485 // result: (TESTQconst [c] x) 7486 for { 7487 if auxIntToInt32(v.AuxInt) != 0 { 7488 break 7489 } 7490 a := v_0 7491 if a.Op != OpAMD64ANDQconst { 7492 break 7493 } 7494 c := auxIntToInt32(a.AuxInt) 7495 x := a.Args[0] 7496 if !(a.Uses == 1) { 7497 break 7498 } 7499 v.reset(OpAMD64TESTQconst) 7500 v.AuxInt = int32ToAuxInt(c) 7501 v.AddArg(x) 7502 return true 7503 } 7504 // match: (CMPQconst x [0]) 7505 // result: (TESTQ x x) 7506 for { 7507 if auxIntToInt32(v.AuxInt) != 0 { 7508 break 7509 } 7510 x := v_0 7511 v.reset(OpAMD64TESTQ) 7512 v.AddArg2(x, x) 7513 return true 7514 } 7515 // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c]) 7516 // cond: l.Uses == 1 && clobber(l) 7517 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) 7518 for { 7519 c := auxIntToInt32(v.AuxInt) 7520 l := v_0 7521 if l.Op != OpAMD64MOVQload { 7522 break 7523 } 7524 off := auxIntToInt32(l.AuxInt) 7525 sym := auxToSym(l.Aux) 7526 mem := l.Args[1] 7527 ptr := l.Args[0] 7528 if !(l.Uses == 1 && clobber(l)) { 7529 break 7530 } 7531 b = l.Block 7532 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) 7533 v.copyOf(v0) 7534 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) 7535 v0.Aux = symToAux(sym) 7536 v0.AddArg2(ptr, mem) 7537 return true 7538 } 7539 return false 7540 } 7541 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool { 7542 v_1 := v.Args[1] 7543 v_0 := v.Args[0] 7544 // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 7545 // cond: ValAndOff(valoff1).canAdd32(off2) 7546 // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 7547 for { 7548 valoff1 := auxIntToValAndOff(v.AuxInt) 7549 sym := auxToSym(v.Aux) 7550 if v_0.Op != OpAMD64ADDQconst { 7551 break 7552 } 7553 off2 := auxIntToInt32(v_0.AuxInt) 7554 base := v_0.Args[0] 7555 mem := v_1 7556 if !(ValAndOff(valoff1).canAdd32(off2)) { 7557 break 7558 } 7559 v.reset(OpAMD64CMPQconstload) 7560 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 7561 v.Aux = symToAux(sym) 7562 v.AddArg2(base, mem) 7563 return true 7564 } 7565 // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 7566 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 7567 // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 7568 for { 7569 valoff1 := auxIntToValAndOff(v.AuxInt) 7570 sym1 := auxToSym(v.Aux) 7571 if v_0.Op != OpAMD64LEAQ { 7572 break 7573 } 7574 off2 := auxIntToInt32(v_0.AuxInt) 7575 sym2 := auxToSym(v_0.Aux) 7576 base := v_0.Args[0] 7577 mem := v_1 7578 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 7579 break 7580 } 7581 v.reset(OpAMD64CMPQconstload) 7582 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 7583 v.Aux = symToAux(mergeSym(sym1, sym2)) 7584 v.AddArg2(base, mem) 7585 return true 7586 } 7587 return false 7588 } 7589 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { 7590 v_2 := v.Args[2] 7591 v_1 := v.Args[1] 7592 v_0 := v.Args[0] 7593 // match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem) 7594 // cond: is32Bit(int64(off1)+int64(off2)) 7595 // result: (CMPQload [off1+off2] {sym} base val mem) 7596 for { 7597 off1 := auxIntToInt32(v.AuxInt) 7598 sym := auxToSym(v.Aux) 7599 if v_0.Op != OpAMD64ADDQconst { 7600 break 7601 } 7602 off2 := auxIntToInt32(v_0.AuxInt) 7603 base := v_0.Args[0] 7604 val := v_1 7605 mem := v_2 7606 if !(is32Bit(int64(off1) + int64(off2))) { 7607 break 7608 } 7609 v.reset(OpAMD64CMPQload) 7610 v.AuxInt = int32ToAuxInt(off1 + off2) 7611 v.Aux = symToAux(sym) 7612 v.AddArg3(base, val, mem) 7613 return true 7614 } 7615 // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7616 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 7617 // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7618 for { 7619 off1 := auxIntToInt32(v.AuxInt) 7620 sym1 := auxToSym(v.Aux) 7621 if v_0.Op != OpAMD64LEAQ { 7622 break 7623 } 7624 off2 := auxIntToInt32(v_0.AuxInt) 7625 sym2 := auxToSym(v_0.Aux) 7626 base := v_0.Args[0] 7627 val := v_1 7628 mem := v_2 7629 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 7630 break 7631 } 7632 v.reset(OpAMD64CMPQload) 7633 v.AuxInt = int32ToAuxInt(off1 + off2) 7634 v.Aux = symToAux(mergeSym(sym1, sym2)) 7635 v.AddArg3(base, val, mem) 7636 return true 7637 } 7638 // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) 7639 // cond: validVal(c) 7640 // result: (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) 7641 for { 7642 off := auxIntToInt32(v.AuxInt) 7643 sym := auxToSym(v.Aux) 7644 ptr := v_0 7645 if v_1.Op != OpAMD64MOVQconst { 7646 break 7647 } 7648 c := auxIntToInt64(v_1.AuxInt) 7649 mem := v_2 7650 if !(validVal(c)) { 7651 break 7652 } 7653 v.reset(OpAMD64CMPQconstload) 7654 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 7655 v.Aux = symToAux(sym) 7656 v.AddArg2(ptr, mem) 7657 return true 7658 } 7659 return false 7660 } 7661 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { 7662 v_1 := v.Args[1] 7663 v_0 := v.Args[0] 7664 b := v.Block 7665 // match: (CMPW x (MOVLconst [c])) 7666 // result: (CMPWconst x [int16(c)]) 7667 for { 7668 x := v_0 7669 if v_1.Op != OpAMD64MOVLconst { 7670 break 7671 } 7672 c := auxIntToInt32(v_1.AuxInt) 7673 v.reset(OpAMD64CMPWconst) 7674 v.AuxInt = int16ToAuxInt(int16(c)) 7675 v.AddArg(x) 7676 return true 7677 } 7678 // match: (CMPW (MOVLconst [c]) x) 7679 // result: (InvertFlags (CMPWconst x [int16(c)])) 7680 for { 7681 if v_0.Op != OpAMD64MOVLconst { 7682 break 7683 } 7684 c := auxIntToInt32(v_0.AuxInt) 7685 x := v_1 7686 v.reset(OpAMD64InvertFlags) 7687 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 7688 v0.AuxInt = int16ToAuxInt(int16(c)) 7689 v0.AddArg(x) 7690 v.AddArg(v0) 7691 return true 7692 } 7693 // match: (CMPW x y) 7694 // cond: canonLessThan(x,y) 7695 // result: (InvertFlags (CMPW y x)) 7696 for { 7697 x := v_0 7698 y := v_1 7699 if !(canonLessThan(x, y)) { 7700 break 7701 } 7702 v.reset(OpAMD64InvertFlags) 7703 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 7704 v0.AddArg2(y, x) 7705 v.AddArg(v0) 7706 return true 7707 } 7708 // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x) 7709 // cond: canMergeLoad(v, l) && clobber(l) 7710 // result: (CMPWload {sym} [off] ptr x mem) 7711 for { 7712 l := v_0 7713 if l.Op != OpAMD64MOVWload { 7714 break 7715 } 7716 off := auxIntToInt32(l.AuxInt) 7717 sym := auxToSym(l.Aux) 7718 mem := l.Args[1] 7719 ptr := l.Args[0] 7720 x := v_1 7721 if !(canMergeLoad(v, l) && clobber(l)) { 7722 break 7723 } 7724 v.reset(OpAMD64CMPWload) 7725 v.AuxInt = int32ToAuxInt(off) 7726 v.Aux = symToAux(sym) 7727 v.AddArg3(ptr, x, mem) 7728 return true 7729 } 7730 // match: (CMPW x l:(MOVWload {sym} [off] ptr mem)) 7731 // cond: canMergeLoad(v, l) && clobber(l) 7732 // result: (InvertFlags (CMPWload {sym} [off] ptr x mem)) 7733 for { 7734 x := v_0 7735 l := v_1 7736 if l.Op != OpAMD64MOVWload { 7737 break 7738 } 7739 off := auxIntToInt32(l.AuxInt) 7740 sym := auxToSym(l.Aux) 7741 mem := l.Args[1] 7742 ptr := l.Args[0] 7743 if !(canMergeLoad(v, l) && clobber(l)) { 7744 break 7745 } 7746 v.reset(OpAMD64InvertFlags) 7747 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags) 7748 v0.AuxInt = int32ToAuxInt(off) 7749 v0.Aux = symToAux(sym) 7750 v0.AddArg3(ptr, x, mem) 7751 v.AddArg(v0) 7752 return true 7753 } 7754 return false 7755 } 7756 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { 7757 v_0 := v.Args[0] 7758 b := v.Block 7759 // match: (CMPWconst (MOVLconst [x]) [y]) 7760 // cond: int16(x)==y 7761 // result: (FlagEQ) 7762 for { 7763 y := auxIntToInt16(v.AuxInt) 7764 if v_0.Op != OpAMD64MOVLconst { 7765 break 7766 } 7767 x := auxIntToInt32(v_0.AuxInt) 7768 if !(int16(x) == y) { 7769 break 7770 } 7771 v.reset(OpAMD64FlagEQ) 7772 return true 7773 } 7774 // match: (CMPWconst (MOVLconst [x]) [y]) 7775 // cond: int16(x)<y && uint16(x)<uint16(y) 7776 // result: (FlagLT_ULT) 7777 for { 7778 y := auxIntToInt16(v.AuxInt) 7779 if v_0.Op != OpAMD64MOVLconst { 7780 break 7781 } 7782 x := auxIntToInt32(v_0.AuxInt) 7783 if !(int16(x) < y && uint16(x) < uint16(y)) { 7784 break 7785 } 7786 v.reset(OpAMD64FlagLT_ULT) 7787 return true 7788 } 7789 // match: (CMPWconst (MOVLconst [x]) [y]) 7790 // cond: int16(x)<y && uint16(x)>uint16(y) 7791 // result: (FlagLT_UGT) 7792 for { 7793 y := auxIntToInt16(v.AuxInt) 7794 if v_0.Op != OpAMD64MOVLconst { 7795 break 7796 } 7797 x := auxIntToInt32(v_0.AuxInt) 7798 if !(int16(x) < y && uint16(x) > uint16(y)) { 7799 break 7800 } 7801 v.reset(OpAMD64FlagLT_UGT) 7802 return true 7803 } 7804 // match: (CMPWconst (MOVLconst [x]) [y]) 7805 // cond: int16(x)>y && uint16(x)<uint16(y) 7806 // result: (FlagGT_ULT) 7807 for { 7808 y := auxIntToInt16(v.AuxInt) 7809 if v_0.Op != OpAMD64MOVLconst { 7810 break 7811 } 7812 x := auxIntToInt32(v_0.AuxInt) 7813 if !(int16(x) > y && uint16(x) < uint16(y)) { 7814 break 7815 } 7816 v.reset(OpAMD64FlagGT_ULT) 7817 return true 7818 } 7819 // match: (CMPWconst (MOVLconst [x]) [y]) 7820 // cond: int16(x)>y && uint16(x)>uint16(y) 7821 // result: (FlagGT_UGT) 7822 for { 7823 y := auxIntToInt16(v.AuxInt) 7824 if v_0.Op != OpAMD64MOVLconst { 7825 break 7826 } 7827 x := auxIntToInt32(v_0.AuxInt) 7828 if !(int16(x) > y && uint16(x) > uint16(y)) { 7829 break 7830 } 7831 v.reset(OpAMD64FlagGT_UGT) 7832 return true 7833 } 7834 // match: (CMPWconst (ANDLconst _ [m]) [n]) 7835 // cond: 0 <= int16(m) && int16(m) < n 7836 // result: (FlagLT_ULT) 7837 for { 7838 n := auxIntToInt16(v.AuxInt) 7839 if v_0.Op != OpAMD64ANDLconst { 7840 break 7841 } 7842 m := auxIntToInt32(v_0.AuxInt) 7843 if !(0 <= int16(m) && int16(m) < n) { 7844 break 7845 } 7846 v.reset(OpAMD64FlagLT_ULT) 7847 return true 7848 } 7849 // match: (CMPWconst a:(ANDL x y) [0]) 7850 // cond: a.Uses == 1 7851 // result: (TESTW x y) 7852 for { 7853 if auxIntToInt16(v.AuxInt) != 0 { 7854 break 7855 } 7856 a := v_0 7857 if a.Op != OpAMD64ANDL { 7858 break 7859 } 7860 y := a.Args[1] 7861 x := a.Args[0] 7862 if !(a.Uses == 1) { 7863 break 7864 } 7865 v.reset(OpAMD64TESTW) 7866 v.AddArg2(x, y) 7867 return true 7868 } 7869 // match: (CMPWconst a:(ANDLconst [c] x) [0]) 7870 // cond: a.Uses == 1 7871 // result: (TESTWconst [int16(c)] x) 7872 for { 7873 if auxIntToInt16(v.AuxInt) != 0 { 7874 break 7875 } 7876 a := v_0 7877 if a.Op != OpAMD64ANDLconst { 7878 break 7879 } 7880 c := auxIntToInt32(a.AuxInt) 7881 x := a.Args[0] 7882 if !(a.Uses == 1) { 7883 break 7884 } 7885 v.reset(OpAMD64TESTWconst) 7886 v.AuxInt = int16ToAuxInt(int16(c)) 7887 v.AddArg(x) 7888 return true 7889 } 7890 // match: (CMPWconst x [0]) 7891 // result: (TESTW x x) 7892 for { 7893 if auxIntToInt16(v.AuxInt) != 0 { 7894 break 7895 } 7896 x := v_0 7897 v.reset(OpAMD64TESTW) 7898 v.AddArg2(x, x) 7899 return true 7900 } 7901 // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) 7902 // cond: l.Uses == 1 && clobber(l) 7903 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) 7904 for { 7905 c := auxIntToInt16(v.AuxInt) 7906 l := v_0 7907 if l.Op != OpAMD64MOVWload { 7908 break 7909 } 7910 off := auxIntToInt32(l.AuxInt) 7911 sym := auxToSym(l.Aux) 7912 mem := l.Args[1] 7913 ptr := l.Args[0] 7914 if !(l.Uses == 1 && clobber(l)) { 7915 break 7916 } 7917 b = l.Block 7918 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) 7919 v.copyOf(v0) 7920 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 7921 v0.Aux = symToAux(sym) 7922 v0.AddArg2(ptr, mem) 7923 return true 7924 } 7925 return false 7926 } 7927 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool { 7928 v_1 := v.Args[1] 7929 v_0 := v.Args[0] 7930 // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 7931 // cond: ValAndOff(valoff1).canAdd32(off2) 7932 // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 7933 for { 7934 valoff1 := auxIntToValAndOff(v.AuxInt) 7935 sym := auxToSym(v.Aux) 7936 if v_0.Op != OpAMD64ADDQconst { 7937 break 7938 } 7939 off2 := auxIntToInt32(v_0.AuxInt) 7940 base := v_0.Args[0] 7941 mem := v_1 7942 if !(ValAndOff(valoff1).canAdd32(off2)) { 7943 break 7944 } 7945 v.reset(OpAMD64CMPWconstload) 7946 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 7947 v.Aux = symToAux(sym) 7948 v.AddArg2(base, mem) 7949 return true 7950 } 7951 // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 7952 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 7953 // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 7954 for { 7955 valoff1 := auxIntToValAndOff(v.AuxInt) 7956 sym1 := auxToSym(v.Aux) 7957 if v_0.Op != OpAMD64LEAQ { 7958 break 7959 } 7960 off2 := auxIntToInt32(v_0.AuxInt) 7961 sym2 := auxToSym(v_0.Aux) 7962 base := v_0.Args[0] 7963 mem := v_1 7964 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 7965 break 7966 } 7967 v.reset(OpAMD64CMPWconstload) 7968 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 7969 v.Aux = symToAux(mergeSym(sym1, sym2)) 7970 v.AddArg2(base, mem) 7971 return true 7972 } 7973 return false 7974 } 7975 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { 7976 v_2 := v.Args[2] 7977 v_1 := v.Args[1] 7978 v_0 := v.Args[0] 7979 // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem) 7980 // cond: is32Bit(int64(off1)+int64(off2)) 7981 // result: (CMPWload [off1+off2] {sym} base val mem) 7982 for { 7983 off1 := auxIntToInt32(v.AuxInt) 7984 sym := auxToSym(v.Aux) 7985 if v_0.Op != OpAMD64ADDQconst { 7986 break 7987 } 7988 off2 := auxIntToInt32(v_0.AuxInt) 7989 base := v_0.Args[0] 7990 val := v_1 7991 mem := v_2 7992 if !(is32Bit(int64(off1) + int64(off2))) { 7993 break 7994 } 7995 v.reset(OpAMD64CMPWload) 7996 v.AuxInt = int32ToAuxInt(off1 + off2) 7997 v.Aux = symToAux(sym) 7998 v.AddArg3(base, val, mem) 7999 return true 8000 } 8001 // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8002 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8003 // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8004 for { 8005 off1 := auxIntToInt32(v.AuxInt) 8006 sym1 := auxToSym(v.Aux) 8007 if v_0.Op != OpAMD64LEAQ { 8008 break 8009 } 8010 off2 := auxIntToInt32(v_0.AuxInt) 8011 sym2 := auxToSym(v_0.Aux) 8012 base := v_0.Args[0] 8013 val := v_1 8014 mem := v_2 8015 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8016 break 8017 } 8018 v.reset(OpAMD64CMPWload) 8019 v.AuxInt = int32ToAuxInt(off1 + off2) 8020 v.Aux = symToAux(mergeSym(sym1, sym2)) 8021 v.AddArg3(base, val, mem) 8022 return true 8023 } 8024 // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) 8025 // result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem) 8026 for { 8027 off := auxIntToInt32(v.AuxInt) 8028 sym := auxToSym(v.Aux) 8029 ptr := v_0 8030 if v_1.Op != OpAMD64MOVLconst { 8031 break 8032 } 8033 c := auxIntToInt32(v_1.AuxInt) 8034 mem := v_2 8035 v.reset(OpAMD64CMPWconstload) 8036 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) 8037 v.Aux = symToAux(sym) 8038 v.AddArg2(ptr, mem) 8039 return true 8040 } 8041 return false 8042 } 8043 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool { 8044 v_3 := v.Args[3] 8045 v_2 := v.Args[2] 8046 v_1 := v.Args[1] 8047 v_0 := v.Args[0] 8048 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 8049 // cond: is32Bit(int64(off1)+int64(off2)) 8050 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 8051 for { 8052 off1 := auxIntToInt32(v.AuxInt) 8053 sym := auxToSym(v.Aux) 8054 if v_0.Op != OpAMD64ADDQconst { 8055 break 8056 } 8057 off2 := auxIntToInt32(v_0.AuxInt) 8058 ptr := v_0.Args[0] 8059 old := v_1 8060 new_ := v_2 8061 mem := v_3 8062 if !(is32Bit(int64(off1) + int64(off2))) { 8063 break 8064 } 8065 v.reset(OpAMD64CMPXCHGLlock) 8066 v.AuxInt = int32ToAuxInt(off1 + off2) 8067 v.Aux = symToAux(sym) 8068 v.AddArg4(ptr, old, new_, mem) 8069 return true 8070 } 8071 return false 8072 } 8073 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool { 8074 v_3 := v.Args[3] 8075 v_2 := v.Args[2] 8076 v_1 := v.Args[1] 8077 v_0 := v.Args[0] 8078 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 8079 // cond: is32Bit(int64(off1)+int64(off2)) 8080 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 8081 for { 8082 off1 := auxIntToInt32(v.AuxInt) 8083 sym := auxToSym(v.Aux) 8084 if v_0.Op != OpAMD64ADDQconst { 8085 break 8086 } 8087 off2 := auxIntToInt32(v_0.AuxInt) 8088 ptr := v_0.Args[0] 8089 old := v_1 8090 new_ := v_2 8091 mem := v_3 8092 if !(is32Bit(int64(off1) + int64(off2))) { 8093 break 8094 } 8095 v.reset(OpAMD64CMPXCHGQlock) 8096 v.AuxInt = int32ToAuxInt(off1 + off2) 8097 v.Aux = symToAux(sym) 8098 v.AddArg4(ptr, old, new_, mem) 8099 return true 8100 } 8101 return false 8102 } 8103 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool { 8104 v_1 := v.Args[1] 8105 v_0 := v.Args[0] 8106 // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem)) 8107 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 8108 // result: (DIVSDload x [off] {sym} ptr mem) 8109 for { 8110 x := v_0 8111 l := v_1 8112 if l.Op != OpAMD64MOVSDload { 8113 break 8114 } 8115 off := auxIntToInt32(l.AuxInt) 8116 sym := auxToSym(l.Aux) 8117 mem := l.Args[1] 8118 ptr := l.Args[0] 8119 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 8120 break 8121 } 8122 v.reset(OpAMD64DIVSDload) 8123 v.AuxInt = int32ToAuxInt(off) 8124 v.Aux = symToAux(sym) 8125 v.AddArg3(x, ptr, mem) 8126 return true 8127 } 8128 return false 8129 } 8130 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool { 8131 v_2 := v.Args[2] 8132 v_1 := v.Args[1] 8133 v_0 := v.Args[0] 8134 // match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem) 8135 // cond: is32Bit(int64(off1)+int64(off2)) 8136 // result: (DIVSDload [off1+off2] {sym} val base mem) 8137 for { 8138 off1 := auxIntToInt32(v.AuxInt) 8139 sym := auxToSym(v.Aux) 8140 val := v_0 8141 if v_1.Op != OpAMD64ADDQconst { 8142 break 8143 } 8144 off2 := auxIntToInt32(v_1.AuxInt) 8145 base := v_1.Args[0] 8146 mem := v_2 8147 if !(is32Bit(int64(off1) + int64(off2))) { 8148 break 8149 } 8150 v.reset(OpAMD64DIVSDload) 8151 v.AuxInt = int32ToAuxInt(off1 + off2) 8152 v.Aux = symToAux(sym) 8153 v.AddArg3(val, base, mem) 8154 return true 8155 } 8156 // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 8157 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8158 // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 8159 for { 8160 off1 := auxIntToInt32(v.AuxInt) 8161 sym1 := auxToSym(v.Aux) 8162 val := v_0 8163 if v_1.Op != OpAMD64LEAQ { 8164 break 8165 } 8166 off2 := auxIntToInt32(v_1.AuxInt) 8167 sym2 := auxToSym(v_1.Aux) 8168 base := v_1.Args[0] 8169 mem := v_2 8170 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8171 break 8172 } 8173 v.reset(OpAMD64DIVSDload) 8174 v.AuxInt = int32ToAuxInt(off1 + off2) 8175 v.Aux = symToAux(mergeSym(sym1, sym2)) 8176 v.AddArg3(val, base, mem) 8177 return true 8178 } 8179 return false 8180 } 8181 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool { 8182 v_1 := v.Args[1] 8183 v_0 := v.Args[0] 8184 // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem)) 8185 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 8186 // result: (DIVSSload x [off] {sym} ptr mem) 8187 for { 8188 x := v_0 8189 l := v_1 8190 if l.Op != OpAMD64MOVSSload { 8191 break 8192 } 8193 off := auxIntToInt32(l.AuxInt) 8194 sym := auxToSym(l.Aux) 8195 mem := l.Args[1] 8196 ptr := l.Args[0] 8197 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 8198 break 8199 } 8200 v.reset(OpAMD64DIVSSload) 8201 v.AuxInt = int32ToAuxInt(off) 8202 v.Aux = symToAux(sym) 8203 v.AddArg3(x, ptr, mem) 8204 return true 8205 } 8206 return false 8207 } 8208 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool { 8209 v_2 := v.Args[2] 8210 v_1 := v.Args[1] 8211 v_0 := v.Args[0] 8212 // match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem) 8213 // cond: is32Bit(int64(off1)+int64(off2)) 8214 // result: (DIVSSload [off1+off2] {sym} val base mem) 8215 for { 8216 off1 := auxIntToInt32(v.AuxInt) 8217 sym := auxToSym(v.Aux) 8218 val := v_0 8219 if v_1.Op != OpAMD64ADDQconst { 8220 break 8221 } 8222 off2 := auxIntToInt32(v_1.AuxInt) 8223 base := v_1.Args[0] 8224 mem := v_2 8225 if !(is32Bit(int64(off1) + int64(off2))) { 8226 break 8227 } 8228 v.reset(OpAMD64DIVSSload) 8229 v.AuxInt = int32ToAuxInt(off1 + off2) 8230 v.Aux = symToAux(sym) 8231 v.AddArg3(val, base, mem) 8232 return true 8233 } 8234 // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 8235 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8236 // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 8237 for { 8238 off1 := auxIntToInt32(v.AuxInt) 8239 sym1 := auxToSym(v.Aux) 8240 val := v_0 8241 if v_1.Op != OpAMD64LEAQ { 8242 break 8243 } 8244 off2 := auxIntToInt32(v_1.AuxInt) 8245 sym2 := auxToSym(v_1.Aux) 8246 base := v_1.Args[0] 8247 mem := v_2 8248 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8249 break 8250 } 8251 v.reset(OpAMD64DIVSSload) 8252 v.AuxInt = int32ToAuxInt(off1 + off2) 8253 v.Aux = symToAux(mergeSym(sym1, sym2)) 8254 v.AddArg3(val, base, mem) 8255 return true 8256 } 8257 return false 8258 } 8259 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool { 8260 v_1 := v.Args[1] 8261 v_0 := v.Args[0] 8262 // match: (HMULL x y) 8263 // cond: !x.rematerializeable() && y.rematerializeable() 8264 // result: (HMULL y x) 8265 for { 8266 x := v_0 8267 y := v_1 8268 if !(!x.rematerializeable() && y.rematerializeable()) { 8269 break 8270 } 8271 v.reset(OpAMD64HMULL) 8272 v.AddArg2(y, x) 8273 return true 8274 } 8275 return false 8276 } 8277 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool { 8278 v_1 := v.Args[1] 8279 v_0 := v.Args[0] 8280 // match: (HMULLU x y) 8281 // cond: !x.rematerializeable() && y.rematerializeable() 8282 // result: (HMULLU y x) 8283 for { 8284 x := v_0 8285 y := v_1 8286 if !(!x.rematerializeable() && y.rematerializeable()) { 8287 break 8288 } 8289 v.reset(OpAMD64HMULLU) 8290 v.AddArg2(y, x) 8291 return true 8292 } 8293 return false 8294 } 8295 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool { 8296 v_1 := v.Args[1] 8297 v_0 := v.Args[0] 8298 // match: (HMULQ x y) 8299 // cond: !x.rematerializeable() && y.rematerializeable() 8300 // result: (HMULQ y x) 8301 for { 8302 x := v_0 8303 y := v_1 8304 if !(!x.rematerializeable() && y.rematerializeable()) { 8305 break 8306 } 8307 v.reset(OpAMD64HMULQ) 8308 v.AddArg2(y, x) 8309 return true 8310 } 8311 return false 8312 } 8313 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool { 8314 v_1 := v.Args[1] 8315 v_0 := v.Args[0] 8316 // match: (HMULQU x y) 8317 // cond: !x.rematerializeable() && y.rematerializeable() 8318 // result: (HMULQU y x) 8319 for { 8320 x := v_0 8321 y := v_1 8322 if !(!x.rematerializeable() && y.rematerializeable()) { 8323 break 8324 } 8325 v.reset(OpAMD64HMULQU) 8326 v.AddArg2(y, x) 8327 return true 8328 } 8329 return false 8330 } 8331 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool { 8332 v_0 := v.Args[0] 8333 // match: (LEAL [c] {s} (ADDLconst [d] x)) 8334 // cond: is32Bit(int64(c)+int64(d)) 8335 // result: (LEAL [c+d] {s} x) 8336 for { 8337 c := auxIntToInt32(v.AuxInt) 8338 s := auxToSym(v.Aux) 8339 if v_0.Op != OpAMD64ADDLconst { 8340 break 8341 } 8342 d := auxIntToInt32(v_0.AuxInt) 8343 x := v_0.Args[0] 8344 if !(is32Bit(int64(c) + int64(d))) { 8345 break 8346 } 8347 v.reset(OpAMD64LEAL) 8348 v.AuxInt = int32ToAuxInt(c + d) 8349 v.Aux = symToAux(s) 8350 v.AddArg(x) 8351 return true 8352 } 8353 // match: (LEAL [c] {s} (ADDL x y)) 8354 // cond: x.Op != OpSB && y.Op != OpSB 8355 // result: (LEAL1 [c] {s} x y) 8356 for { 8357 c := auxIntToInt32(v.AuxInt) 8358 s := auxToSym(v.Aux) 8359 if v_0.Op != OpAMD64ADDL { 8360 break 8361 } 8362 _ = v_0.Args[1] 8363 v_0_0 := v_0.Args[0] 8364 v_0_1 := v_0.Args[1] 8365 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 8366 x := v_0_0 8367 y := v_0_1 8368 if !(x.Op != OpSB && y.Op != OpSB) { 8369 continue 8370 } 8371 v.reset(OpAMD64LEAL1) 8372 v.AuxInt = int32ToAuxInt(c) 8373 v.Aux = symToAux(s) 8374 v.AddArg2(x, y) 8375 return true 8376 } 8377 break 8378 } 8379 return false 8380 } 8381 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { 8382 v_1 := v.Args[1] 8383 v_0 := v.Args[0] 8384 // match: (LEAL1 [c] {s} (ADDLconst [d] x) y) 8385 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 8386 // result: (LEAL1 [c+d] {s} x y) 8387 for { 8388 c := auxIntToInt32(v.AuxInt) 8389 s := auxToSym(v.Aux) 8390 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8391 if v_0.Op != OpAMD64ADDLconst { 8392 continue 8393 } 8394 d := auxIntToInt32(v_0.AuxInt) 8395 x := v_0.Args[0] 8396 y := v_1 8397 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 8398 continue 8399 } 8400 v.reset(OpAMD64LEAL1) 8401 v.AuxInt = int32ToAuxInt(c + d) 8402 v.Aux = symToAux(s) 8403 v.AddArg2(x, y) 8404 return true 8405 } 8406 break 8407 } 8408 // match: (LEAL1 [c] {s} x (SHLLconst [1] y)) 8409 // result: (LEAL2 [c] {s} x y) 8410 for { 8411 c := auxIntToInt32(v.AuxInt) 8412 s := auxToSym(v.Aux) 8413 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8414 x := v_0 8415 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { 8416 continue 8417 } 8418 y := v_1.Args[0] 8419 v.reset(OpAMD64LEAL2) 8420 v.AuxInt = int32ToAuxInt(c) 8421 v.Aux = symToAux(s) 8422 v.AddArg2(x, y) 8423 return true 8424 } 8425 break 8426 } 8427 // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) 8428 // result: (LEAL4 [c] {s} x y) 8429 for { 8430 c := auxIntToInt32(v.AuxInt) 8431 s := auxToSym(v.Aux) 8432 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8433 x := v_0 8434 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 { 8435 continue 8436 } 8437 y := v_1.Args[0] 8438 v.reset(OpAMD64LEAL4) 8439 v.AuxInt = int32ToAuxInt(c) 8440 v.Aux = symToAux(s) 8441 v.AddArg2(x, y) 8442 return true 8443 } 8444 break 8445 } 8446 // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) 8447 // result: (LEAL8 [c] {s} x y) 8448 for { 8449 c := auxIntToInt32(v.AuxInt) 8450 s := auxToSym(v.Aux) 8451 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8452 x := v_0 8453 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 { 8454 continue 8455 } 8456 y := v_1.Args[0] 8457 v.reset(OpAMD64LEAL8) 8458 v.AuxInt = int32ToAuxInt(c) 8459 v.Aux = symToAux(s) 8460 v.AddArg2(x, y) 8461 return true 8462 } 8463 break 8464 } 8465 return false 8466 } 8467 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool { 8468 v_1 := v.Args[1] 8469 v_0 := v.Args[0] 8470 // match: (LEAL2 [c] {s} (ADDLconst [d] x) y) 8471 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 8472 // result: (LEAL2 [c+d] {s} x y) 8473 for { 8474 c := auxIntToInt32(v.AuxInt) 8475 s := auxToSym(v.Aux) 8476 if v_0.Op != OpAMD64ADDLconst { 8477 break 8478 } 8479 d := auxIntToInt32(v_0.AuxInt) 8480 x := v_0.Args[0] 8481 y := v_1 8482 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 8483 break 8484 } 8485 v.reset(OpAMD64LEAL2) 8486 v.AuxInt = int32ToAuxInt(c + d) 8487 v.Aux = symToAux(s) 8488 v.AddArg2(x, y) 8489 return true 8490 } 8491 // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) 8492 // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB 8493 // result: (LEAL2 [c+2*d] {s} x y) 8494 for { 8495 c := auxIntToInt32(v.AuxInt) 8496 s := auxToSym(v.Aux) 8497 x := v_0 8498 if v_1.Op != OpAMD64ADDLconst { 8499 break 8500 } 8501 d := auxIntToInt32(v_1.AuxInt) 8502 y := v_1.Args[0] 8503 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) { 8504 break 8505 } 8506 v.reset(OpAMD64LEAL2) 8507 v.AuxInt = int32ToAuxInt(c + 2*d) 8508 v.Aux = symToAux(s) 8509 v.AddArg2(x, y) 8510 return true 8511 } 8512 // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) 8513 // result: (LEAL4 [c] {s} x y) 8514 for { 8515 c := auxIntToInt32(v.AuxInt) 8516 s := auxToSym(v.Aux) 8517 x := v_0 8518 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { 8519 break 8520 } 8521 y := v_1.Args[0] 8522 v.reset(OpAMD64LEAL4) 8523 v.AuxInt = int32ToAuxInt(c) 8524 v.Aux = symToAux(s) 8525 v.AddArg2(x, y) 8526 return true 8527 } 8528 // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) 8529 // result: (LEAL8 [c] {s} x y) 8530 for { 8531 c := auxIntToInt32(v.AuxInt) 8532 s := auxToSym(v.Aux) 8533 x := v_0 8534 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 { 8535 break 8536 } 8537 y := v_1.Args[0] 8538 v.reset(OpAMD64LEAL8) 8539 v.AuxInt = int32ToAuxInt(c) 8540 v.Aux = symToAux(s) 8541 v.AddArg2(x, y) 8542 return true 8543 } 8544 return false 8545 } 8546 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool { 8547 v_1 := v.Args[1] 8548 v_0 := v.Args[0] 8549 // match: (LEAL4 [c] {s} (ADDLconst [d] x) y) 8550 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 8551 // result: (LEAL4 [c+d] {s} x y) 8552 for { 8553 c := auxIntToInt32(v.AuxInt) 8554 s := auxToSym(v.Aux) 8555 if v_0.Op != OpAMD64ADDLconst { 8556 break 8557 } 8558 d := auxIntToInt32(v_0.AuxInt) 8559 x := v_0.Args[0] 8560 y := v_1 8561 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 8562 break 8563 } 8564 v.reset(OpAMD64LEAL4) 8565 v.AuxInt = int32ToAuxInt(c + d) 8566 v.Aux = symToAux(s) 8567 v.AddArg2(x, y) 8568 return true 8569 } 8570 // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) 8571 // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB 8572 // result: (LEAL4 [c+4*d] {s} x y) 8573 for { 8574 c := auxIntToInt32(v.AuxInt) 8575 s := auxToSym(v.Aux) 8576 x := v_0 8577 if v_1.Op != OpAMD64ADDLconst { 8578 break 8579 } 8580 d := auxIntToInt32(v_1.AuxInt) 8581 y := v_1.Args[0] 8582 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) { 8583 break 8584 } 8585 v.reset(OpAMD64LEAL4) 8586 v.AuxInt = int32ToAuxInt(c + 4*d) 8587 v.Aux = symToAux(s) 8588 v.AddArg2(x, y) 8589 return true 8590 } 8591 // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) 8592 // result: (LEAL8 [c] {s} x y) 8593 for { 8594 c := auxIntToInt32(v.AuxInt) 8595 s := auxToSym(v.Aux) 8596 x := v_0 8597 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { 8598 break 8599 } 8600 y := v_1.Args[0] 8601 v.reset(OpAMD64LEAL8) 8602 v.AuxInt = int32ToAuxInt(c) 8603 v.Aux = symToAux(s) 8604 v.AddArg2(x, y) 8605 return true 8606 } 8607 return false 8608 } 8609 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool { 8610 v_1 := v.Args[1] 8611 v_0 := v.Args[0] 8612 // match: (LEAL8 [c] {s} (ADDLconst [d] x) y) 8613 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 8614 // result: (LEAL8 [c+d] {s} x y) 8615 for { 8616 c := auxIntToInt32(v.AuxInt) 8617 s := auxToSym(v.Aux) 8618 if v_0.Op != OpAMD64ADDLconst { 8619 break 8620 } 8621 d := auxIntToInt32(v_0.AuxInt) 8622 x := v_0.Args[0] 8623 y := v_1 8624 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 8625 break 8626 } 8627 v.reset(OpAMD64LEAL8) 8628 v.AuxInt = int32ToAuxInt(c + d) 8629 v.Aux = symToAux(s) 8630 v.AddArg2(x, y) 8631 return true 8632 } 8633 // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) 8634 // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB 8635 // result: (LEAL8 [c+8*d] {s} x y) 8636 for { 8637 c := auxIntToInt32(v.AuxInt) 8638 s := auxToSym(v.Aux) 8639 x := v_0 8640 if v_1.Op != OpAMD64ADDLconst { 8641 break 8642 } 8643 d := auxIntToInt32(v_1.AuxInt) 8644 y := v_1.Args[0] 8645 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) { 8646 break 8647 } 8648 v.reset(OpAMD64LEAL8) 8649 v.AuxInt = int32ToAuxInt(c + 8*d) 8650 v.Aux = symToAux(s) 8651 v.AddArg2(x, y) 8652 return true 8653 } 8654 return false 8655 } 8656 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { 8657 v_0 := v.Args[0] 8658 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 8659 // cond: is32Bit(int64(c)+int64(d)) 8660 // result: (LEAQ [c+d] {s} x) 8661 for { 8662 c := auxIntToInt32(v.AuxInt) 8663 s := auxToSym(v.Aux) 8664 if v_0.Op != OpAMD64ADDQconst { 8665 break 8666 } 8667 d := auxIntToInt32(v_0.AuxInt) 8668 x := v_0.Args[0] 8669 if !(is32Bit(int64(c) + int64(d))) { 8670 break 8671 } 8672 v.reset(OpAMD64LEAQ) 8673 v.AuxInt = int32ToAuxInt(c + d) 8674 v.Aux = symToAux(s) 8675 v.AddArg(x) 8676 return true 8677 } 8678 // match: (LEAQ [c] {s} (ADDQ x y)) 8679 // cond: x.Op != OpSB && y.Op != OpSB 8680 // result: (LEAQ1 [c] {s} x y) 8681 for { 8682 c := auxIntToInt32(v.AuxInt) 8683 s := auxToSym(v.Aux) 8684 if v_0.Op != OpAMD64ADDQ { 8685 break 8686 } 8687 _ = v_0.Args[1] 8688 v_0_0 := v_0.Args[0] 8689 v_0_1 := v_0.Args[1] 8690 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 8691 x := v_0_0 8692 y := v_0_1 8693 if !(x.Op != OpSB && y.Op != OpSB) { 8694 continue 8695 } 8696 v.reset(OpAMD64LEAQ1) 8697 v.AuxInt = int32ToAuxInt(c) 8698 v.Aux = symToAux(s) 8699 v.AddArg2(x, y) 8700 return true 8701 } 8702 break 8703 } 8704 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 8705 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8706 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 8707 for { 8708 off1 := auxIntToInt32(v.AuxInt) 8709 sym1 := auxToSym(v.Aux) 8710 if v_0.Op != OpAMD64LEAQ { 8711 break 8712 } 8713 off2 := auxIntToInt32(v_0.AuxInt) 8714 sym2 := auxToSym(v_0.Aux) 8715 x := v_0.Args[0] 8716 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8717 break 8718 } 8719 v.reset(OpAMD64LEAQ) 8720 v.AuxInt = int32ToAuxInt(off1 + off2) 8721 v.Aux = symToAux(mergeSym(sym1, sym2)) 8722 v.AddArg(x) 8723 return true 8724 } 8725 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 8726 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8727 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 8728 for { 8729 off1 := auxIntToInt32(v.AuxInt) 8730 sym1 := auxToSym(v.Aux) 8731 if v_0.Op != OpAMD64LEAQ1 { 8732 break 8733 } 8734 off2 := auxIntToInt32(v_0.AuxInt) 8735 sym2 := auxToSym(v_0.Aux) 8736 y := v_0.Args[1] 8737 x := v_0.Args[0] 8738 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8739 break 8740 } 8741 v.reset(OpAMD64LEAQ1) 8742 v.AuxInt = int32ToAuxInt(off1 + off2) 8743 v.Aux = symToAux(mergeSym(sym1, sym2)) 8744 v.AddArg2(x, y) 8745 return true 8746 } 8747 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 8748 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8749 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 8750 for { 8751 off1 := auxIntToInt32(v.AuxInt) 8752 sym1 := auxToSym(v.Aux) 8753 if v_0.Op != OpAMD64LEAQ2 { 8754 break 8755 } 8756 off2 := auxIntToInt32(v_0.AuxInt) 8757 sym2 := auxToSym(v_0.Aux) 8758 y := v_0.Args[1] 8759 x := v_0.Args[0] 8760 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8761 break 8762 } 8763 v.reset(OpAMD64LEAQ2) 8764 v.AuxInt = int32ToAuxInt(off1 + off2) 8765 v.Aux = symToAux(mergeSym(sym1, sym2)) 8766 v.AddArg2(x, y) 8767 return true 8768 } 8769 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 8770 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8771 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 8772 for { 8773 off1 := auxIntToInt32(v.AuxInt) 8774 sym1 := auxToSym(v.Aux) 8775 if v_0.Op != OpAMD64LEAQ4 { 8776 break 8777 } 8778 off2 := auxIntToInt32(v_0.AuxInt) 8779 sym2 := auxToSym(v_0.Aux) 8780 y := v_0.Args[1] 8781 x := v_0.Args[0] 8782 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8783 break 8784 } 8785 v.reset(OpAMD64LEAQ4) 8786 v.AuxInt = int32ToAuxInt(off1 + off2) 8787 v.Aux = symToAux(mergeSym(sym1, sym2)) 8788 v.AddArg2(x, y) 8789 return true 8790 } 8791 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 8792 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8793 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 8794 for { 8795 off1 := auxIntToInt32(v.AuxInt) 8796 sym1 := auxToSym(v.Aux) 8797 if v_0.Op != OpAMD64LEAQ8 { 8798 break 8799 } 8800 off2 := auxIntToInt32(v_0.AuxInt) 8801 sym2 := auxToSym(v_0.Aux) 8802 y := v_0.Args[1] 8803 x := v_0.Args[0] 8804 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8805 break 8806 } 8807 v.reset(OpAMD64LEAQ8) 8808 v.AuxInt = int32ToAuxInt(off1 + off2) 8809 v.Aux = symToAux(mergeSym(sym1, sym2)) 8810 v.AddArg2(x, y) 8811 return true 8812 } 8813 return false 8814 } 8815 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { 8816 v_1 := v.Args[1] 8817 v_0 := v.Args[0] 8818 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 8819 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 8820 // result: (LEAQ1 [c+d] {s} x y) 8821 for { 8822 c := auxIntToInt32(v.AuxInt) 8823 s := auxToSym(v.Aux) 8824 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8825 if v_0.Op != OpAMD64ADDQconst { 8826 continue 8827 } 8828 d := auxIntToInt32(v_0.AuxInt) 8829 x := v_0.Args[0] 8830 y := v_1 8831 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 8832 continue 8833 } 8834 v.reset(OpAMD64LEAQ1) 8835 v.AuxInt = int32ToAuxInt(c + d) 8836 v.Aux = symToAux(s) 8837 v.AddArg2(x, y) 8838 return true 8839 } 8840 break 8841 } 8842 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 8843 // result: (LEAQ2 [c] {s} x y) 8844 for { 8845 c := auxIntToInt32(v.AuxInt) 8846 s := auxToSym(v.Aux) 8847 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8848 x := v_0 8849 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { 8850 continue 8851 } 8852 y := v_1.Args[0] 8853 v.reset(OpAMD64LEAQ2) 8854 v.AuxInt = int32ToAuxInt(c) 8855 v.Aux = symToAux(s) 8856 v.AddArg2(x, y) 8857 return true 8858 } 8859 break 8860 } 8861 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 8862 // result: (LEAQ4 [c] {s} x y) 8863 for { 8864 c := auxIntToInt32(v.AuxInt) 8865 s := auxToSym(v.Aux) 8866 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8867 x := v_0 8868 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 { 8869 continue 8870 } 8871 y := v_1.Args[0] 8872 v.reset(OpAMD64LEAQ4) 8873 v.AuxInt = int32ToAuxInt(c) 8874 v.Aux = symToAux(s) 8875 v.AddArg2(x, y) 8876 return true 8877 } 8878 break 8879 } 8880 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 8881 // result: (LEAQ8 [c] {s} x y) 8882 for { 8883 c := auxIntToInt32(v.AuxInt) 8884 s := auxToSym(v.Aux) 8885 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8886 x := v_0 8887 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 { 8888 continue 8889 } 8890 y := v_1.Args[0] 8891 v.reset(OpAMD64LEAQ8) 8892 v.AuxInt = int32ToAuxInt(c) 8893 v.Aux = symToAux(s) 8894 v.AddArg2(x, y) 8895 return true 8896 } 8897 break 8898 } 8899 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 8900 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB 8901 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 8902 for { 8903 off1 := auxIntToInt32(v.AuxInt) 8904 sym1 := auxToSym(v.Aux) 8905 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8906 if v_0.Op != OpAMD64LEAQ { 8907 continue 8908 } 8909 off2 := auxIntToInt32(v_0.AuxInt) 8910 sym2 := auxToSym(v_0.Aux) 8911 x := v_0.Args[0] 8912 y := v_1 8913 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 8914 continue 8915 } 8916 v.reset(OpAMD64LEAQ1) 8917 v.AuxInt = int32ToAuxInt(off1 + off2) 8918 v.Aux = symToAux(mergeSym(sym1, sym2)) 8919 v.AddArg2(x, y) 8920 return true 8921 } 8922 break 8923 } 8924 // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) 8925 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8926 // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y) 8927 for { 8928 off1 := auxIntToInt32(v.AuxInt) 8929 sym1 := auxToSym(v.Aux) 8930 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8931 x := v_0 8932 if v_1.Op != OpAMD64LEAQ1 { 8933 continue 8934 } 8935 off2 := auxIntToInt32(v_1.AuxInt) 8936 sym2 := auxToSym(v_1.Aux) 8937 y := v_1.Args[1] 8938 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8939 continue 8940 } 8941 v.reset(OpAMD64LEAQ2) 8942 v.AuxInt = int32ToAuxInt(off1 + off2) 8943 v.Aux = symToAux(mergeSym(sym1, sym2)) 8944 v.AddArg2(x, y) 8945 return true 8946 } 8947 break 8948 } 8949 // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) 8950 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8951 // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x) 8952 for { 8953 off1 := auxIntToInt32(v.AuxInt) 8954 sym1 := auxToSym(v.Aux) 8955 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8956 x := v_0 8957 if v_1.Op != OpAMD64LEAQ1 { 8958 continue 8959 } 8960 off2 := auxIntToInt32(v_1.AuxInt) 8961 sym2 := auxToSym(v_1.Aux) 8962 _ = v_1.Args[1] 8963 v_1_0 := v_1.Args[0] 8964 v_1_1 := v_1.Args[1] 8965 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { 8966 if x != v_1_0 { 8967 continue 8968 } 8969 y := v_1_1 8970 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8971 continue 8972 } 8973 v.reset(OpAMD64LEAQ2) 8974 v.AuxInt = int32ToAuxInt(off1 + off2) 8975 v.Aux = symToAux(mergeSym(sym1, sym2)) 8976 v.AddArg2(y, x) 8977 return true 8978 } 8979 } 8980 break 8981 } 8982 // match: (LEAQ1 [0] x y) 8983 // cond: v.Aux == nil 8984 // result: (ADDQ x y) 8985 for { 8986 if auxIntToInt32(v.AuxInt) != 0 { 8987 break 8988 } 8989 x := v_0 8990 y := v_1 8991 if !(v.Aux == nil) { 8992 break 8993 } 8994 v.reset(OpAMD64ADDQ) 8995 v.AddArg2(x, y) 8996 return true 8997 } 8998 return false 8999 } 9000 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { 9001 v_1 := v.Args[1] 9002 v_0 := v.Args[0] 9003 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 9004 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 9005 // result: (LEAQ2 [c+d] {s} x y) 9006 for { 9007 c := auxIntToInt32(v.AuxInt) 9008 s := auxToSym(v.Aux) 9009 if v_0.Op != OpAMD64ADDQconst { 9010 break 9011 } 9012 d := auxIntToInt32(v_0.AuxInt) 9013 x := v_0.Args[0] 9014 y := v_1 9015 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 9016 break 9017 } 9018 v.reset(OpAMD64LEAQ2) 9019 v.AuxInt = int32ToAuxInt(c + d) 9020 v.Aux = symToAux(s) 9021 v.AddArg2(x, y) 9022 return true 9023 } 9024 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 9025 // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB 9026 // result: (LEAQ2 [c+2*d] {s} x y) 9027 for { 9028 c := auxIntToInt32(v.AuxInt) 9029 s := auxToSym(v.Aux) 9030 x := v_0 9031 if v_1.Op != OpAMD64ADDQconst { 9032 break 9033 } 9034 d := auxIntToInt32(v_1.AuxInt) 9035 y := v_1.Args[0] 9036 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) { 9037 break 9038 } 9039 v.reset(OpAMD64LEAQ2) 9040 v.AuxInt = int32ToAuxInt(c + 2*d) 9041 v.Aux = symToAux(s) 9042 v.AddArg2(x, y) 9043 return true 9044 } 9045 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 9046 // result: (LEAQ4 [c] {s} x y) 9047 for { 9048 c := auxIntToInt32(v.AuxInt) 9049 s := auxToSym(v.Aux) 9050 x := v_0 9051 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { 9052 break 9053 } 9054 y := v_1.Args[0] 9055 v.reset(OpAMD64LEAQ4) 9056 v.AuxInt = int32ToAuxInt(c) 9057 v.Aux = symToAux(s) 9058 v.AddArg2(x, y) 9059 return true 9060 } 9061 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 9062 // result: (LEAQ8 [c] {s} x y) 9063 for { 9064 c := auxIntToInt32(v.AuxInt) 9065 s := auxToSym(v.Aux) 9066 x := v_0 9067 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 { 9068 break 9069 } 9070 y := v_1.Args[0] 9071 v.reset(OpAMD64LEAQ8) 9072 v.AuxInt = int32ToAuxInt(c) 9073 v.Aux = symToAux(s) 9074 v.AddArg2(x, y) 9075 return true 9076 } 9077 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 9078 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB 9079 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 9080 for { 9081 off1 := auxIntToInt32(v.AuxInt) 9082 sym1 := auxToSym(v.Aux) 9083 if v_0.Op != OpAMD64LEAQ { 9084 break 9085 } 9086 off2 := auxIntToInt32(v_0.AuxInt) 9087 sym2 := auxToSym(v_0.Aux) 9088 x := v_0.Args[0] 9089 y := v_1 9090 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 9091 break 9092 } 9093 v.reset(OpAMD64LEAQ2) 9094 v.AuxInt = int32ToAuxInt(off1 + off2) 9095 v.Aux = symToAux(mergeSym(sym1, sym2)) 9096 v.AddArg2(x, y) 9097 return true 9098 } 9099 // match: (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) 9100 // cond: is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil 9101 // result: (LEAQ4 [off1+2*off2] {sym1} x y) 9102 for { 9103 off1 := auxIntToInt32(v.AuxInt) 9104 sym1 := auxToSym(v.Aux) 9105 x := v_0 9106 if v_1.Op != OpAMD64LEAQ1 { 9107 break 9108 } 9109 off2 := auxIntToInt32(v_1.AuxInt) 9110 sym2 := auxToSym(v_1.Aux) 9111 y := v_1.Args[1] 9112 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) { 9113 break 9114 } 9115 v.reset(OpAMD64LEAQ4) 9116 v.AuxInt = int32ToAuxInt(off1 + 2*off2) 9117 v.Aux = symToAux(sym1) 9118 v.AddArg2(x, y) 9119 return true 9120 } 9121 // match: (LEAQ2 [off] {sym} x (MOVQconst [scale])) 9122 // cond: is32Bit(int64(off)+int64(scale)*2) 9123 // result: (LEAQ [off+int32(scale)*2] {sym} x) 9124 for { 9125 off := auxIntToInt32(v.AuxInt) 9126 sym := auxToSym(v.Aux) 9127 x := v_0 9128 if v_1.Op != OpAMD64MOVQconst { 9129 break 9130 } 9131 scale := auxIntToInt64(v_1.AuxInt) 9132 if !(is32Bit(int64(off) + int64(scale)*2)) { 9133 break 9134 } 9135 v.reset(OpAMD64LEAQ) 9136 v.AuxInt = int32ToAuxInt(off + int32(scale)*2) 9137 v.Aux = symToAux(sym) 9138 v.AddArg(x) 9139 return true 9140 } 9141 // match: (LEAQ2 [off] {sym} x (MOVLconst [scale])) 9142 // cond: is32Bit(int64(off)+int64(scale)*2) 9143 // result: (LEAQ [off+int32(scale)*2] {sym} x) 9144 for { 9145 off := auxIntToInt32(v.AuxInt) 9146 sym := auxToSym(v.Aux) 9147 x := v_0 9148 if v_1.Op != OpAMD64MOVLconst { 9149 break 9150 } 9151 scale := auxIntToInt32(v_1.AuxInt) 9152 if !(is32Bit(int64(off) + int64(scale)*2)) { 9153 break 9154 } 9155 v.reset(OpAMD64LEAQ) 9156 v.AuxInt = int32ToAuxInt(off + int32(scale)*2) 9157 v.Aux = symToAux(sym) 9158 v.AddArg(x) 9159 return true 9160 } 9161 return false 9162 } 9163 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { 9164 v_1 := v.Args[1] 9165 v_0 := v.Args[0] 9166 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 9167 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 9168 // result: (LEAQ4 [c+d] {s} x y) 9169 for { 9170 c := auxIntToInt32(v.AuxInt) 9171 s := auxToSym(v.Aux) 9172 if v_0.Op != OpAMD64ADDQconst { 9173 break 9174 } 9175 d := auxIntToInt32(v_0.AuxInt) 9176 x := v_0.Args[0] 9177 y := v_1 9178 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 9179 break 9180 } 9181 v.reset(OpAMD64LEAQ4) 9182 v.AuxInt = int32ToAuxInt(c + d) 9183 v.Aux = symToAux(s) 9184 v.AddArg2(x, y) 9185 return true 9186 } 9187 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 9188 // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB 9189 // result: (LEAQ4 [c+4*d] {s} x y) 9190 for { 9191 c := auxIntToInt32(v.AuxInt) 9192 s := auxToSym(v.Aux) 9193 x := v_0 9194 if v_1.Op != OpAMD64ADDQconst { 9195 break 9196 } 9197 d := auxIntToInt32(v_1.AuxInt) 9198 y := v_1.Args[0] 9199 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) { 9200 break 9201 } 9202 v.reset(OpAMD64LEAQ4) 9203 v.AuxInt = int32ToAuxInt(c + 4*d) 9204 v.Aux = symToAux(s) 9205 v.AddArg2(x, y) 9206 return true 9207 } 9208 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 9209 // result: (LEAQ8 [c] {s} x y) 9210 for { 9211 c := auxIntToInt32(v.AuxInt) 9212 s := auxToSym(v.Aux) 9213 x := v_0 9214 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { 9215 break 9216 } 9217 y := v_1.Args[0] 9218 v.reset(OpAMD64LEAQ8) 9219 v.AuxInt = int32ToAuxInt(c) 9220 v.Aux = symToAux(s) 9221 v.AddArg2(x, y) 9222 return true 9223 } 9224 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 9225 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB 9226 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 9227 for { 9228 off1 := auxIntToInt32(v.AuxInt) 9229 sym1 := auxToSym(v.Aux) 9230 if v_0.Op != OpAMD64LEAQ { 9231 break 9232 } 9233 off2 := auxIntToInt32(v_0.AuxInt) 9234 sym2 := auxToSym(v_0.Aux) 9235 x := v_0.Args[0] 9236 y := v_1 9237 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 9238 break 9239 } 9240 v.reset(OpAMD64LEAQ4) 9241 v.AuxInt = int32ToAuxInt(off1 + off2) 9242 v.Aux = symToAux(mergeSym(sym1, sym2)) 9243 v.AddArg2(x, y) 9244 return true 9245 } 9246 // match: (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) 9247 // cond: is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil 9248 // result: (LEAQ8 [off1+4*off2] {sym1} x y) 9249 for { 9250 off1 := auxIntToInt32(v.AuxInt) 9251 sym1 := auxToSym(v.Aux) 9252 x := v_0 9253 if v_1.Op != OpAMD64LEAQ1 { 9254 break 9255 } 9256 off2 := auxIntToInt32(v_1.AuxInt) 9257 sym2 := auxToSym(v_1.Aux) 9258 y := v_1.Args[1] 9259 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) { 9260 break 9261 } 9262 v.reset(OpAMD64LEAQ8) 9263 v.AuxInt = int32ToAuxInt(off1 + 4*off2) 9264 v.Aux = symToAux(sym1) 9265 v.AddArg2(x, y) 9266 return true 9267 } 9268 // match: (LEAQ4 [off] {sym} x (MOVQconst [scale])) 9269 // cond: is32Bit(int64(off)+int64(scale)*4) 9270 // result: (LEAQ [off+int32(scale)*4] {sym} x) 9271 for { 9272 off := auxIntToInt32(v.AuxInt) 9273 sym := auxToSym(v.Aux) 9274 x := v_0 9275 if v_1.Op != OpAMD64MOVQconst { 9276 break 9277 } 9278 scale := auxIntToInt64(v_1.AuxInt) 9279 if !(is32Bit(int64(off) + int64(scale)*4)) { 9280 break 9281 } 9282 v.reset(OpAMD64LEAQ) 9283 v.AuxInt = int32ToAuxInt(off + int32(scale)*4) 9284 v.Aux = symToAux(sym) 9285 v.AddArg(x) 9286 return true 9287 } 9288 // match: (LEAQ4 [off] {sym} x (MOVLconst [scale])) 9289 // cond: is32Bit(int64(off)+int64(scale)*4) 9290 // result: (LEAQ [off+int32(scale)*4] {sym} x) 9291 for { 9292 off := auxIntToInt32(v.AuxInt) 9293 sym := auxToSym(v.Aux) 9294 x := v_0 9295 if v_1.Op != OpAMD64MOVLconst { 9296 break 9297 } 9298 scale := auxIntToInt32(v_1.AuxInt) 9299 if !(is32Bit(int64(off) + int64(scale)*4)) { 9300 break 9301 } 9302 v.reset(OpAMD64LEAQ) 9303 v.AuxInt = int32ToAuxInt(off + int32(scale)*4) 9304 v.Aux = symToAux(sym) 9305 v.AddArg(x) 9306 return true 9307 } 9308 return false 9309 } 9310 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool { 9311 v_1 := v.Args[1] 9312 v_0 := v.Args[0] 9313 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 9314 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 9315 // result: (LEAQ8 [c+d] {s} x y) 9316 for { 9317 c := auxIntToInt32(v.AuxInt) 9318 s := auxToSym(v.Aux) 9319 if v_0.Op != OpAMD64ADDQconst { 9320 break 9321 } 9322 d := auxIntToInt32(v_0.AuxInt) 9323 x := v_0.Args[0] 9324 y := v_1 9325 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 9326 break 9327 } 9328 v.reset(OpAMD64LEAQ8) 9329 v.AuxInt = int32ToAuxInt(c + d) 9330 v.Aux = symToAux(s) 9331 v.AddArg2(x, y) 9332 return true 9333 } 9334 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 9335 // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB 9336 // result: (LEAQ8 [c+8*d] {s} x y) 9337 for { 9338 c := auxIntToInt32(v.AuxInt) 9339 s := auxToSym(v.Aux) 9340 x := v_0 9341 if v_1.Op != OpAMD64ADDQconst { 9342 break 9343 } 9344 d := auxIntToInt32(v_1.AuxInt) 9345 y := v_1.Args[0] 9346 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) { 9347 break 9348 } 9349 v.reset(OpAMD64LEAQ8) 9350 v.AuxInt = int32ToAuxInt(c + 8*d) 9351 v.Aux = symToAux(s) 9352 v.AddArg2(x, y) 9353 return true 9354 } 9355 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 9356 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB 9357 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 9358 for { 9359 off1 := auxIntToInt32(v.AuxInt) 9360 sym1 := auxToSym(v.Aux) 9361 if v_0.Op != OpAMD64LEAQ { 9362 break 9363 } 9364 off2 := auxIntToInt32(v_0.AuxInt) 9365 sym2 := auxToSym(v_0.Aux) 9366 x := v_0.Args[0] 9367 y := v_1 9368 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 9369 break 9370 } 9371 v.reset(OpAMD64LEAQ8) 9372 v.AuxInt = int32ToAuxInt(off1 + off2) 9373 v.Aux = symToAux(mergeSym(sym1, sym2)) 9374 v.AddArg2(x, y) 9375 return true 9376 } 9377 // match: (LEAQ8 [off] {sym} x (MOVQconst [scale])) 9378 // cond: is32Bit(int64(off)+int64(scale)*8) 9379 // result: (LEAQ [off+int32(scale)*8] {sym} x) 9380 for { 9381 off := auxIntToInt32(v.AuxInt) 9382 sym := auxToSym(v.Aux) 9383 x := v_0 9384 if v_1.Op != OpAMD64MOVQconst { 9385 break 9386 } 9387 scale := auxIntToInt64(v_1.AuxInt) 9388 if !(is32Bit(int64(off) + int64(scale)*8)) { 9389 break 9390 } 9391 v.reset(OpAMD64LEAQ) 9392 v.AuxInt = int32ToAuxInt(off + int32(scale)*8) 9393 v.Aux = symToAux(sym) 9394 v.AddArg(x) 9395 return true 9396 } 9397 // match: (LEAQ8 [off] {sym} x (MOVLconst [scale])) 9398 // cond: is32Bit(int64(off)+int64(scale)*8) 9399 // result: (LEAQ [off+int32(scale)*8] {sym} x) 9400 for { 9401 off := auxIntToInt32(v.AuxInt) 9402 sym := auxToSym(v.Aux) 9403 x := v_0 9404 if v_1.Op != OpAMD64MOVLconst { 9405 break 9406 } 9407 scale := auxIntToInt32(v_1.AuxInt) 9408 if !(is32Bit(int64(off) + int64(scale)*8)) { 9409 break 9410 } 9411 v.reset(OpAMD64LEAQ) 9412 v.AuxInt = int32ToAuxInt(off + int32(scale)*8) 9413 v.Aux = symToAux(sym) 9414 v.AddArg(x) 9415 return true 9416 } 9417 return false 9418 } 9419 func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool { 9420 v_2 := v.Args[2] 9421 v_1 := v.Args[1] 9422 v_0 := v.Args[0] 9423 // match: (MOVBELstore [i] {s} p x:(BSWAPL w) mem) 9424 // cond: x.Uses == 1 9425 // result: (MOVLstore [i] {s} p w mem) 9426 for { 9427 i := auxIntToInt32(v.AuxInt) 9428 s := auxToSym(v.Aux) 9429 p := v_0 9430 x := v_1 9431 if x.Op != OpAMD64BSWAPL { 9432 break 9433 } 9434 w := x.Args[0] 9435 mem := v_2 9436 if !(x.Uses == 1) { 9437 break 9438 } 9439 v.reset(OpAMD64MOVLstore) 9440 v.AuxInt = int32ToAuxInt(i) 9441 v.Aux = symToAux(s) 9442 v.AddArg3(p, w, mem) 9443 return true 9444 } 9445 return false 9446 } 9447 func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool { 9448 v_2 := v.Args[2] 9449 v_1 := v.Args[1] 9450 v_0 := v.Args[0] 9451 // match: (MOVBEQstore [i] {s} p x:(BSWAPQ w) mem) 9452 // cond: x.Uses == 1 9453 // result: (MOVQstore [i] {s} p w mem) 9454 for { 9455 i := auxIntToInt32(v.AuxInt) 9456 s := auxToSym(v.Aux) 9457 p := v_0 9458 x := v_1 9459 if x.Op != OpAMD64BSWAPQ { 9460 break 9461 } 9462 w := x.Args[0] 9463 mem := v_2 9464 if !(x.Uses == 1) { 9465 break 9466 } 9467 v.reset(OpAMD64MOVQstore) 9468 v.AuxInt = int32ToAuxInt(i) 9469 v.Aux = symToAux(s) 9470 v.AddArg3(p, w, mem) 9471 return true 9472 } 9473 return false 9474 } 9475 func rewriteValueAMD64_OpAMD64MOVBEWstore(v *Value) bool { 9476 v_2 := v.Args[2] 9477 v_1 := v.Args[1] 9478 v_0 := v.Args[0] 9479 // match: (MOVBEWstore [i] {s} p x:(ROLWconst [8] w) mem) 9480 // cond: x.Uses == 1 9481 // result: (MOVWstore [i] {s} p w mem) 9482 for { 9483 i := auxIntToInt32(v.AuxInt) 9484 s := auxToSym(v.Aux) 9485 p := v_0 9486 x := v_1 9487 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 { 9488 break 9489 } 9490 w := x.Args[0] 9491 mem := v_2 9492 if !(x.Uses == 1) { 9493 break 9494 } 9495 v.reset(OpAMD64MOVWstore) 9496 v.AuxInt = int32ToAuxInt(i) 9497 v.Aux = symToAux(s) 9498 v.AddArg3(p, w, mem) 9499 return true 9500 } 9501 return false 9502 } 9503 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { 9504 v_0 := v.Args[0] 9505 b := v.Block 9506 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 9507 // cond: x.Uses == 1 && clobber(x) 9508 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 9509 for { 9510 x := v_0 9511 if x.Op != OpAMD64MOVBload { 9512 break 9513 } 9514 off := auxIntToInt32(x.AuxInt) 9515 sym := auxToSym(x.Aux) 9516 mem := x.Args[1] 9517 ptr := x.Args[0] 9518 if !(x.Uses == 1 && clobber(x)) { 9519 break 9520 } 9521 b = x.Block 9522 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 9523 v.copyOf(v0) 9524 v0.AuxInt = int32ToAuxInt(off) 9525 v0.Aux = symToAux(sym) 9526 v0.AddArg2(ptr, mem) 9527 return true 9528 } 9529 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 9530 // cond: x.Uses == 1 && clobber(x) 9531 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 9532 for { 9533 x := v_0 9534 if x.Op != OpAMD64MOVWload { 9535 break 9536 } 9537 off := auxIntToInt32(x.AuxInt) 9538 sym := auxToSym(x.Aux) 9539 mem := x.Args[1] 9540 ptr := x.Args[0] 9541 if !(x.Uses == 1 && clobber(x)) { 9542 break 9543 } 9544 b = x.Block 9545 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 9546 v.copyOf(v0) 9547 v0.AuxInt = int32ToAuxInt(off) 9548 v0.Aux = symToAux(sym) 9549 v0.AddArg2(ptr, mem) 9550 return true 9551 } 9552 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 9553 // cond: x.Uses == 1 && clobber(x) 9554 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 9555 for { 9556 x := v_0 9557 if x.Op != OpAMD64MOVLload { 9558 break 9559 } 9560 off := auxIntToInt32(x.AuxInt) 9561 sym := auxToSym(x.Aux) 9562 mem := x.Args[1] 9563 ptr := x.Args[0] 9564 if !(x.Uses == 1 && clobber(x)) { 9565 break 9566 } 9567 b = x.Block 9568 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 9569 v.copyOf(v0) 9570 v0.AuxInt = int32ToAuxInt(off) 9571 v0.Aux = symToAux(sym) 9572 v0.AddArg2(ptr, mem) 9573 return true 9574 } 9575 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 9576 // cond: x.Uses == 1 && clobber(x) 9577 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 9578 for { 9579 x := v_0 9580 if x.Op != OpAMD64MOVQload { 9581 break 9582 } 9583 off := auxIntToInt32(x.AuxInt) 9584 sym := auxToSym(x.Aux) 9585 mem := x.Args[1] 9586 ptr := x.Args[0] 9587 if !(x.Uses == 1 && clobber(x)) { 9588 break 9589 } 9590 b = x.Block 9591 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 9592 v.copyOf(v0) 9593 v0.AuxInt = int32ToAuxInt(off) 9594 v0.Aux = symToAux(sym) 9595 v0.AddArg2(ptr, mem) 9596 return true 9597 } 9598 // match: (MOVBQSX (ANDLconst [c] x)) 9599 // cond: c & 0x80 == 0 9600 // result: (ANDLconst [c & 0x7f] x) 9601 for { 9602 if v_0.Op != OpAMD64ANDLconst { 9603 break 9604 } 9605 c := auxIntToInt32(v_0.AuxInt) 9606 x := v_0.Args[0] 9607 if !(c&0x80 == 0) { 9608 break 9609 } 9610 v.reset(OpAMD64ANDLconst) 9611 v.AuxInt = int32ToAuxInt(c & 0x7f) 9612 v.AddArg(x) 9613 return true 9614 } 9615 // match: (MOVBQSX (MOVBQSX x)) 9616 // result: (MOVBQSX x) 9617 for { 9618 if v_0.Op != OpAMD64MOVBQSX { 9619 break 9620 } 9621 x := v_0.Args[0] 9622 v.reset(OpAMD64MOVBQSX) 9623 v.AddArg(x) 9624 return true 9625 } 9626 return false 9627 } 9628 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool { 9629 v_1 := v.Args[1] 9630 v_0 := v.Args[0] 9631 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 9632 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 9633 // result: (MOVBQSX x) 9634 for { 9635 off := auxIntToInt32(v.AuxInt) 9636 sym := auxToSym(v.Aux) 9637 ptr := v_0 9638 if v_1.Op != OpAMD64MOVBstore { 9639 break 9640 } 9641 off2 := auxIntToInt32(v_1.AuxInt) 9642 sym2 := auxToSym(v_1.Aux) 9643 x := v_1.Args[1] 9644 ptr2 := v_1.Args[0] 9645 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 9646 break 9647 } 9648 v.reset(OpAMD64MOVBQSX) 9649 v.AddArg(x) 9650 return true 9651 } 9652 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9653 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 9654 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9655 for { 9656 off1 := auxIntToInt32(v.AuxInt) 9657 sym1 := auxToSym(v.Aux) 9658 if v_0.Op != OpAMD64LEAQ { 9659 break 9660 } 9661 off2 := auxIntToInt32(v_0.AuxInt) 9662 sym2 := auxToSym(v_0.Aux) 9663 base := v_0.Args[0] 9664 mem := v_1 9665 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 9666 break 9667 } 9668 v.reset(OpAMD64MOVBQSXload) 9669 v.AuxInt = int32ToAuxInt(off1 + off2) 9670 v.Aux = symToAux(mergeSym(sym1, sym2)) 9671 v.AddArg2(base, mem) 9672 return true 9673 } 9674 return false 9675 } 9676 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { 9677 v_0 := v.Args[0] 9678 b := v.Block 9679 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 9680 // cond: x.Uses == 1 && clobber(x) 9681 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 9682 for { 9683 x := v_0 9684 if x.Op != OpAMD64MOVBload { 9685 break 9686 } 9687 off := auxIntToInt32(x.AuxInt) 9688 sym := auxToSym(x.Aux) 9689 mem := x.Args[1] 9690 ptr := x.Args[0] 9691 if !(x.Uses == 1 && clobber(x)) { 9692 break 9693 } 9694 b = x.Block 9695 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 9696 v.copyOf(v0) 9697 v0.AuxInt = int32ToAuxInt(off) 9698 v0.Aux = symToAux(sym) 9699 v0.AddArg2(ptr, mem) 9700 return true 9701 } 9702 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 9703 // cond: x.Uses == 1 && clobber(x) 9704 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 9705 for { 9706 x := v_0 9707 if x.Op != OpAMD64MOVWload { 9708 break 9709 } 9710 off := auxIntToInt32(x.AuxInt) 9711 sym := auxToSym(x.Aux) 9712 mem := x.Args[1] 9713 ptr := x.Args[0] 9714 if !(x.Uses == 1 && clobber(x)) { 9715 break 9716 } 9717 b = x.Block 9718 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 9719 v.copyOf(v0) 9720 v0.AuxInt = int32ToAuxInt(off) 9721 v0.Aux = symToAux(sym) 9722 v0.AddArg2(ptr, mem) 9723 return true 9724 } 9725 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 9726 // cond: x.Uses == 1 && clobber(x) 9727 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 9728 for { 9729 x := v_0 9730 if x.Op != OpAMD64MOVLload { 9731 break 9732 } 9733 off := auxIntToInt32(x.AuxInt) 9734 sym := auxToSym(x.Aux) 9735 mem := x.Args[1] 9736 ptr := x.Args[0] 9737 if !(x.Uses == 1 && clobber(x)) { 9738 break 9739 } 9740 b = x.Block 9741 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 9742 v.copyOf(v0) 9743 v0.AuxInt = int32ToAuxInt(off) 9744 v0.Aux = symToAux(sym) 9745 v0.AddArg2(ptr, mem) 9746 return true 9747 } 9748 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 9749 // cond: x.Uses == 1 && clobber(x) 9750 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 9751 for { 9752 x := v_0 9753 if x.Op != OpAMD64MOVQload { 9754 break 9755 } 9756 off := auxIntToInt32(x.AuxInt) 9757 sym := auxToSym(x.Aux) 9758 mem := x.Args[1] 9759 ptr := x.Args[0] 9760 if !(x.Uses == 1 && clobber(x)) { 9761 break 9762 } 9763 b = x.Block 9764 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 9765 v.copyOf(v0) 9766 v0.AuxInt = int32ToAuxInt(off) 9767 v0.Aux = symToAux(sym) 9768 v0.AddArg2(ptr, mem) 9769 return true 9770 } 9771 // match: (MOVBQZX x) 9772 // cond: zeroUpper56Bits(x,3) 9773 // result: x 9774 for { 9775 x := v_0 9776 if !(zeroUpper56Bits(x, 3)) { 9777 break 9778 } 9779 v.copyOf(x) 9780 return true 9781 } 9782 // match: (MOVBQZX (ANDLconst [c] x)) 9783 // result: (ANDLconst [c & 0xff] x) 9784 for { 9785 if v_0.Op != OpAMD64ANDLconst { 9786 break 9787 } 9788 c := auxIntToInt32(v_0.AuxInt) 9789 x := v_0.Args[0] 9790 v.reset(OpAMD64ANDLconst) 9791 v.AuxInt = int32ToAuxInt(c & 0xff) 9792 v.AddArg(x) 9793 return true 9794 } 9795 // match: (MOVBQZX (MOVBQZX x)) 9796 // result: (MOVBQZX x) 9797 for { 9798 if v_0.Op != OpAMD64MOVBQZX { 9799 break 9800 } 9801 x := v_0.Args[0] 9802 v.reset(OpAMD64MOVBQZX) 9803 v.AddArg(x) 9804 return true 9805 } 9806 return false 9807 } 9808 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool { 9809 v_1 := v.Args[1] 9810 v_0 := v.Args[0] 9811 // match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 9812 // cond: is32Bit(int64(off1)+int64(off2)) 9813 // result: (MOVBatomicload [off1+off2] {sym} ptr mem) 9814 for { 9815 off1 := auxIntToInt32(v.AuxInt) 9816 sym := auxToSym(v.Aux) 9817 if v_0.Op != OpAMD64ADDQconst { 9818 break 9819 } 9820 off2 := auxIntToInt32(v_0.AuxInt) 9821 ptr := v_0.Args[0] 9822 mem := v_1 9823 if !(is32Bit(int64(off1) + int64(off2))) { 9824 break 9825 } 9826 v.reset(OpAMD64MOVBatomicload) 9827 v.AuxInt = int32ToAuxInt(off1 + off2) 9828 v.Aux = symToAux(sym) 9829 v.AddArg2(ptr, mem) 9830 return true 9831 } 9832 // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 9833 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 9834 // result: (MOVBatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem) 9835 for { 9836 off1 := auxIntToInt32(v.AuxInt) 9837 sym1 := auxToSym(v.Aux) 9838 if v_0.Op != OpAMD64LEAQ { 9839 break 9840 } 9841 off2 := auxIntToInt32(v_0.AuxInt) 9842 sym2 := auxToSym(v_0.Aux) 9843 ptr := v_0.Args[0] 9844 mem := v_1 9845 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 9846 break 9847 } 9848 v.reset(OpAMD64MOVBatomicload) 9849 v.AuxInt = int32ToAuxInt(off1 + off2) 9850 v.Aux = symToAux(mergeSym(sym1, sym2)) 9851 v.AddArg2(ptr, mem) 9852 return true 9853 } 9854 return false 9855 } 9856 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { 9857 v_1 := v.Args[1] 9858 v_0 := v.Args[0] 9859 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 9860 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 9861 // result: (MOVBQZX x) 9862 for { 9863 off := auxIntToInt32(v.AuxInt) 9864 sym := auxToSym(v.Aux) 9865 ptr := v_0 9866 if v_1.Op != OpAMD64MOVBstore { 9867 break 9868 } 9869 off2 := auxIntToInt32(v_1.AuxInt) 9870 sym2 := auxToSym(v_1.Aux) 9871 x := v_1.Args[1] 9872 ptr2 := v_1.Args[0] 9873 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 9874 break 9875 } 9876 v.reset(OpAMD64MOVBQZX) 9877 v.AddArg(x) 9878 return true 9879 } 9880 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 9881 // cond: is32Bit(int64(off1)+int64(off2)) 9882 // result: (MOVBload [off1+off2] {sym} ptr mem) 9883 for { 9884 off1 := auxIntToInt32(v.AuxInt) 9885 sym := auxToSym(v.Aux) 9886 if v_0.Op != OpAMD64ADDQconst { 9887 break 9888 } 9889 off2 := auxIntToInt32(v_0.AuxInt) 9890 ptr := v_0.Args[0] 9891 mem := v_1 9892 if !(is32Bit(int64(off1) + int64(off2))) { 9893 break 9894 } 9895 v.reset(OpAMD64MOVBload) 9896 v.AuxInt = int32ToAuxInt(off1 + off2) 9897 v.Aux = symToAux(sym) 9898 v.AddArg2(ptr, mem) 9899 return true 9900 } 9901 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9902 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 9903 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9904 for { 9905 off1 := auxIntToInt32(v.AuxInt) 9906 sym1 := auxToSym(v.Aux) 9907 if v_0.Op != OpAMD64LEAQ { 9908 break 9909 } 9910 off2 := auxIntToInt32(v_0.AuxInt) 9911 sym2 := auxToSym(v_0.Aux) 9912 base := v_0.Args[0] 9913 mem := v_1 9914 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 9915 break 9916 } 9917 v.reset(OpAMD64MOVBload) 9918 v.AuxInt = int32ToAuxInt(off1 + off2) 9919 v.Aux = symToAux(mergeSym(sym1, sym2)) 9920 v.AddArg2(base, mem) 9921 return true 9922 } 9923 // match: (MOVBload [off] {sym} (SB) _) 9924 // cond: symIsRO(sym) 9925 // result: (MOVLconst [int32(read8(sym, int64(off)))]) 9926 for { 9927 off := auxIntToInt32(v.AuxInt) 9928 sym := auxToSym(v.Aux) 9929 if v_0.Op != OpSB || !(symIsRO(sym)) { 9930 break 9931 } 9932 v.reset(OpAMD64MOVLconst) 9933 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off)))) 9934 return true 9935 } 9936 return false 9937 } 9938 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { 9939 v_2 := v.Args[2] 9940 v_1 := v.Args[1] 9941 v_0 := v.Args[0] 9942 b := v.Block 9943 typ := &b.Func.Config.Types 9944 // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) 9945 // cond: y.Uses == 1 9946 // result: (SETLstore [off] {sym} ptr x mem) 9947 for { 9948 off := auxIntToInt32(v.AuxInt) 9949 sym := auxToSym(v.Aux) 9950 ptr := v_0 9951 y := v_1 9952 if y.Op != OpAMD64SETL { 9953 break 9954 } 9955 x := y.Args[0] 9956 mem := v_2 9957 if !(y.Uses == 1) { 9958 break 9959 } 9960 v.reset(OpAMD64SETLstore) 9961 v.AuxInt = int32ToAuxInt(off) 9962 v.Aux = symToAux(sym) 9963 v.AddArg3(ptr, x, mem) 9964 return true 9965 } 9966 // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) 9967 // cond: y.Uses == 1 9968 // result: (SETLEstore [off] {sym} ptr x mem) 9969 for { 9970 off := auxIntToInt32(v.AuxInt) 9971 sym := auxToSym(v.Aux) 9972 ptr := v_0 9973 y := v_1 9974 if y.Op != OpAMD64SETLE { 9975 break 9976 } 9977 x := y.Args[0] 9978 mem := v_2 9979 if !(y.Uses == 1) { 9980 break 9981 } 9982 v.reset(OpAMD64SETLEstore) 9983 v.AuxInt = int32ToAuxInt(off) 9984 v.Aux = symToAux(sym) 9985 v.AddArg3(ptr, x, mem) 9986 return true 9987 } 9988 // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) 9989 // cond: y.Uses == 1 9990 // result: (SETGstore [off] {sym} ptr x mem) 9991 for { 9992 off := auxIntToInt32(v.AuxInt) 9993 sym := auxToSym(v.Aux) 9994 ptr := v_0 9995 y := v_1 9996 if y.Op != OpAMD64SETG { 9997 break 9998 } 9999 x := y.Args[0] 10000 mem := v_2 10001 if !(y.Uses == 1) { 10002 break 10003 } 10004 v.reset(OpAMD64SETGstore) 10005 v.AuxInt = int32ToAuxInt(off) 10006 v.Aux = symToAux(sym) 10007 v.AddArg3(ptr, x, mem) 10008 return true 10009 } 10010 // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) 10011 // cond: y.Uses == 1 10012 // result: (SETGEstore [off] {sym} ptr x mem) 10013 for { 10014 off := auxIntToInt32(v.AuxInt) 10015 sym := auxToSym(v.Aux) 10016 ptr := v_0 10017 y := v_1 10018 if y.Op != OpAMD64SETGE { 10019 break 10020 } 10021 x := y.Args[0] 10022 mem := v_2 10023 if !(y.Uses == 1) { 10024 break 10025 } 10026 v.reset(OpAMD64SETGEstore) 10027 v.AuxInt = int32ToAuxInt(off) 10028 v.Aux = symToAux(sym) 10029 v.AddArg3(ptr, x, mem) 10030 return true 10031 } 10032 // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) 10033 // cond: y.Uses == 1 10034 // result: (SETEQstore [off] {sym} ptr x mem) 10035 for { 10036 off := auxIntToInt32(v.AuxInt) 10037 sym := auxToSym(v.Aux) 10038 ptr := v_0 10039 y := v_1 10040 if y.Op != OpAMD64SETEQ { 10041 break 10042 } 10043 x := y.Args[0] 10044 mem := v_2 10045 if !(y.Uses == 1) { 10046 break 10047 } 10048 v.reset(OpAMD64SETEQstore) 10049 v.AuxInt = int32ToAuxInt(off) 10050 v.Aux = symToAux(sym) 10051 v.AddArg3(ptr, x, mem) 10052 return true 10053 } 10054 // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) 10055 // cond: y.Uses == 1 10056 // result: (SETNEstore [off] {sym} ptr x mem) 10057 for { 10058 off := auxIntToInt32(v.AuxInt) 10059 sym := auxToSym(v.Aux) 10060 ptr := v_0 10061 y := v_1 10062 if y.Op != OpAMD64SETNE { 10063 break 10064 } 10065 x := y.Args[0] 10066 mem := v_2 10067 if !(y.Uses == 1) { 10068 break 10069 } 10070 v.reset(OpAMD64SETNEstore) 10071 v.AuxInt = int32ToAuxInt(off) 10072 v.Aux = symToAux(sym) 10073 v.AddArg3(ptr, x, mem) 10074 return true 10075 } 10076 // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) 10077 // cond: y.Uses == 1 10078 // result: (SETBstore [off] {sym} ptr x mem) 10079 for { 10080 off := auxIntToInt32(v.AuxInt) 10081 sym := auxToSym(v.Aux) 10082 ptr := v_0 10083 y := v_1 10084 if y.Op != OpAMD64SETB { 10085 break 10086 } 10087 x := y.Args[0] 10088 mem := v_2 10089 if !(y.Uses == 1) { 10090 break 10091 } 10092 v.reset(OpAMD64SETBstore) 10093 v.AuxInt = int32ToAuxInt(off) 10094 v.Aux = symToAux(sym) 10095 v.AddArg3(ptr, x, mem) 10096 return true 10097 } 10098 // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) 10099 // cond: y.Uses == 1 10100 // result: (SETBEstore [off] {sym} ptr x mem) 10101 for { 10102 off := auxIntToInt32(v.AuxInt) 10103 sym := auxToSym(v.Aux) 10104 ptr := v_0 10105 y := v_1 10106 if y.Op != OpAMD64SETBE { 10107 break 10108 } 10109 x := y.Args[0] 10110 mem := v_2 10111 if !(y.Uses == 1) { 10112 break 10113 } 10114 v.reset(OpAMD64SETBEstore) 10115 v.AuxInt = int32ToAuxInt(off) 10116 v.Aux = symToAux(sym) 10117 v.AddArg3(ptr, x, mem) 10118 return true 10119 } 10120 // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) 10121 // cond: y.Uses == 1 10122 // result: (SETAstore [off] {sym} ptr x mem) 10123 for { 10124 off := auxIntToInt32(v.AuxInt) 10125 sym := auxToSym(v.Aux) 10126 ptr := v_0 10127 y := v_1 10128 if y.Op != OpAMD64SETA { 10129 break 10130 } 10131 x := y.Args[0] 10132 mem := v_2 10133 if !(y.Uses == 1) { 10134 break 10135 } 10136 v.reset(OpAMD64SETAstore) 10137 v.AuxInt = int32ToAuxInt(off) 10138 v.Aux = symToAux(sym) 10139 v.AddArg3(ptr, x, mem) 10140 return true 10141 } 10142 // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) 10143 // cond: y.Uses == 1 10144 // result: (SETAEstore [off] {sym} ptr x mem) 10145 for { 10146 off := auxIntToInt32(v.AuxInt) 10147 sym := auxToSym(v.Aux) 10148 ptr := v_0 10149 y := v_1 10150 if y.Op != OpAMD64SETAE { 10151 break 10152 } 10153 x := y.Args[0] 10154 mem := v_2 10155 if !(y.Uses == 1) { 10156 break 10157 } 10158 v.reset(OpAMD64SETAEstore) 10159 v.AuxInt = int32ToAuxInt(off) 10160 v.Aux = symToAux(sym) 10161 v.AddArg3(ptr, x, mem) 10162 return true 10163 } 10164 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 10165 // result: (MOVBstore [off] {sym} ptr x mem) 10166 for { 10167 off := auxIntToInt32(v.AuxInt) 10168 sym := auxToSym(v.Aux) 10169 ptr := v_0 10170 if v_1.Op != OpAMD64MOVBQSX { 10171 break 10172 } 10173 x := v_1.Args[0] 10174 mem := v_2 10175 v.reset(OpAMD64MOVBstore) 10176 v.AuxInt = int32ToAuxInt(off) 10177 v.Aux = symToAux(sym) 10178 v.AddArg3(ptr, x, mem) 10179 return true 10180 } 10181 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 10182 // result: (MOVBstore [off] {sym} ptr x mem) 10183 for { 10184 off := auxIntToInt32(v.AuxInt) 10185 sym := auxToSym(v.Aux) 10186 ptr := v_0 10187 if v_1.Op != OpAMD64MOVBQZX { 10188 break 10189 } 10190 x := v_1.Args[0] 10191 mem := v_2 10192 v.reset(OpAMD64MOVBstore) 10193 v.AuxInt = int32ToAuxInt(off) 10194 v.Aux = symToAux(sym) 10195 v.AddArg3(ptr, x, mem) 10196 return true 10197 } 10198 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10199 // cond: is32Bit(int64(off1)+int64(off2)) 10200 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 10201 for { 10202 off1 := auxIntToInt32(v.AuxInt) 10203 sym := auxToSym(v.Aux) 10204 if v_0.Op != OpAMD64ADDQconst { 10205 break 10206 } 10207 off2 := auxIntToInt32(v_0.AuxInt) 10208 ptr := v_0.Args[0] 10209 val := v_1 10210 mem := v_2 10211 if !(is32Bit(int64(off1) + int64(off2))) { 10212 break 10213 } 10214 v.reset(OpAMD64MOVBstore) 10215 v.AuxInt = int32ToAuxInt(off1 + off2) 10216 v.Aux = symToAux(sym) 10217 v.AddArg3(ptr, val, mem) 10218 return true 10219 } 10220 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 10221 // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) 10222 for { 10223 off := auxIntToInt32(v.AuxInt) 10224 sym := auxToSym(v.Aux) 10225 ptr := v_0 10226 if v_1.Op != OpAMD64MOVLconst { 10227 break 10228 } 10229 c := auxIntToInt32(v_1.AuxInt) 10230 mem := v_2 10231 v.reset(OpAMD64MOVBstoreconst) 10232 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) 10233 v.Aux = symToAux(sym) 10234 v.AddArg2(ptr, mem) 10235 return true 10236 } 10237 // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem) 10238 // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) 10239 for { 10240 off := auxIntToInt32(v.AuxInt) 10241 sym := auxToSym(v.Aux) 10242 ptr := v_0 10243 if v_1.Op != OpAMD64MOVQconst { 10244 break 10245 } 10246 c := auxIntToInt64(v_1.AuxInt) 10247 mem := v_2 10248 v.reset(OpAMD64MOVBstoreconst) 10249 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) 10250 v.Aux = symToAux(sym) 10251 v.AddArg2(ptr, mem) 10252 return true 10253 } 10254 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10255 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 10256 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10257 for { 10258 off1 := auxIntToInt32(v.AuxInt) 10259 sym1 := auxToSym(v.Aux) 10260 if v_0.Op != OpAMD64LEAQ { 10261 break 10262 } 10263 off2 := auxIntToInt32(v_0.AuxInt) 10264 sym2 := auxToSym(v_0.Aux) 10265 base := v_0.Args[0] 10266 val := v_1 10267 mem := v_2 10268 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 10269 break 10270 } 10271 v.reset(OpAMD64MOVBstore) 10272 v.AuxInt = int32ToAuxInt(off1 + off2) 10273 v.Aux = symToAux(mergeSym(sym1, sym2)) 10274 v.AddArg3(base, val, mem) 10275 return true 10276 } 10277 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 10278 // cond: x0.Uses == 1 && clobber(x0) 10279 // result: (MOVWstore [i-1] {s} p (ROLWconst <typ.UInt16> [8] w) mem) 10280 for { 10281 i := auxIntToInt32(v.AuxInt) 10282 s := auxToSym(v.Aux) 10283 p := v_0 10284 w := v_1 10285 x0 := v_2 10286 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s { 10287 break 10288 } 10289 mem := x0.Args[2] 10290 if p != x0.Args[0] { 10291 break 10292 } 10293 x0_1 := x0.Args[1] 10294 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) { 10295 break 10296 } 10297 v.reset(OpAMD64MOVWstore) 10298 v.AuxInt = int32ToAuxInt(i - 1) 10299 v.Aux = symToAux(s) 10300 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) 10301 v0.AuxInt = int8ToAuxInt(8) 10302 v0.AddArg(w) 10303 v.AddArg3(p, v0, mem) 10304 return true 10305 } 10306 // match: (MOVBstore [i] {s} p1 w x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem)) 10307 // cond: x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0) 10308 // result: (MOVWstore [i] {s} p0 (ROLWconst <typ.UInt16> [8] w) mem) 10309 for { 10310 i := auxIntToInt32(v.AuxInt) 10311 s := auxToSym(v.Aux) 10312 p1 := v_0 10313 w := v_1 10314 x0 := v_2 10315 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { 10316 break 10317 } 10318 mem := x0.Args[2] 10319 p0 := x0.Args[0] 10320 x0_1 := x0.Args[1] 10321 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) { 10322 break 10323 } 10324 v.reset(OpAMD64MOVWstore) 10325 v.AuxInt = int32ToAuxInt(i) 10326 v.Aux = symToAux(s) 10327 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) 10328 v0.AuxInt = int8ToAuxInt(8) 10329 v0.AddArg(w) 10330 v.AddArg3(p0, v0, mem) 10331 return true 10332 } 10333 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 10334 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2) 10335 // result: (MOVLstore [i-3] {s} p (BSWAPL <typ.UInt32> w) mem) 10336 for { 10337 i := auxIntToInt32(v.AuxInt) 10338 s := auxToSym(v.Aux) 10339 p := v_0 10340 w := v_1 10341 x2 := v_2 10342 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-1 || auxToSym(x2.Aux) != s { 10343 break 10344 } 10345 _ = x2.Args[2] 10346 if p != x2.Args[0] { 10347 break 10348 } 10349 x2_1 := x2.Args[1] 10350 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] { 10351 break 10352 } 10353 x1 := x2.Args[2] 10354 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s { 10355 break 10356 } 10357 _ = x1.Args[2] 10358 if p != x1.Args[0] { 10359 break 10360 } 10361 x1_1 := x1.Args[1] 10362 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] { 10363 break 10364 } 10365 x0 := x1.Args[2] 10366 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-3 || auxToSym(x0.Aux) != s { 10367 break 10368 } 10369 mem := x0.Args[2] 10370 if p != x0.Args[0] { 10371 break 10372 } 10373 x0_1 := x0.Args[1] 10374 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) { 10375 break 10376 } 10377 v.reset(OpAMD64MOVLstore) 10378 v.AuxInt = int32ToAuxInt(i - 3) 10379 v.Aux = symToAux(s) 10380 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, typ.UInt32) 10381 v0.AddArg(w) 10382 v.AddArg3(p, v0, mem) 10383 return true 10384 } 10385 // match: (MOVBstore [i] {s} p3 w x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w) x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w) x0:(MOVBstore [i] {s} p0 (SHRLconst [24] w) mem)))) 10386 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2) 10387 // result: (MOVLstore [i] {s} p0 (BSWAPL <typ.UInt32> w) mem) 10388 for { 10389 i := auxIntToInt32(v.AuxInt) 10390 s := auxToSym(v.Aux) 10391 p3 := v_0 10392 w := v_1 10393 x2 := v_2 10394 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s { 10395 break 10396 } 10397 _ = x2.Args[2] 10398 p2 := x2.Args[0] 10399 x2_1 := x2.Args[1] 10400 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] { 10401 break 10402 } 10403 x1 := x2.Args[2] 10404 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { 10405 break 10406 } 10407 _ = x1.Args[2] 10408 p1 := x1.Args[0] 10409 x1_1 := x1.Args[1] 10410 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] { 10411 break 10412 } 10413 x0 := x1.Args[2] 10414 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { 10415 break 10416 } 10417 mem := x0.Args[2] 10418 p0 := x0.Args[0] 10419 x0_1 := x0.Args[1] 10420 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) { 10421 break 10422 } 10423 v.reset(OpAMD64MOVLstore) 10424 v.AuxInt = int32ToAuxInt(i) 10425 v.Aux = symToAux(s) 10426 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, typ.UInt32) 10427 v0.AddArg(w) 10428 v.AddArg3(p0, v0, mem) 10429 return true 10430 } 10431 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 10432 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6) 10433 // result: (MOVQstore [i-7] {s} p (BSWAPQ <typ.UInt64> w) mem) 10434 for { 10435 i := auxIntToInt32(v.AuxInt) 10436 s := auxToSym(v.Aux) 10437 p := v_0 10438 w := v_1 10439 x6 := v_2 10440 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i-1 || auxToSym(x6.Aux) != s { 10441 break 10442 } 10443 _ = x6.Args[2] 10444 if p != x6.Args[0] { 10445 break 10446 } 10447 x6_1 := x6.Args[1] 10448 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] { 10449 break 10450 } 10451 x5 := x6.Args[2] 10452 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i-2 || auxToSym(x5.Aux) != s { 10453 break 10454 } 10455 _ = x5.Args[2] 10456 if p != x5.Args[0] { 10457 break 10458 } 10459 x5_1 := x5.Args[1] 10460 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] { 10461 break 10462 } 10463 x4 := x5.Args[2] 10464 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i-3 || auxToSym(x4.Aux) != s { 10465 break 10466 } 10467 _ = x4.Args[2] 10468 if p != x4.Args[0] { 10469 break 10470 } 10471 x4_1 := x4.Args[1] 10472 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] { 10473 break 10474 } 10475 x3 := x4.Args[2] 10476 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s { 10477 break 10478 } 10479 _ = x3.Args[2] 10480 if p != x3.Args[0] { 10481 break 10482 } 10483 x3_1 := x3.Args[1] 10484 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] { 10485 break 10486 } 10487 x2 := x3.Args[2] 10488 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-5 || auxToSym(x2.Aux) != s { 10489 break 10490 } 10491 _ = x2.Args[2] 10492 if p != x2.Args[0] { 10493 break 10494 } 10495 x2_1 := x2.Args[1] 10496 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] { 10497 break 10498 } 10499 x1 := x2.Args[2] 10500 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-6 || auxToSym(x1.Aux) != s { 10501 break 10502 } 10503 _ = x1.Args[2] 10504 if p != x1.Args[0] { 10505 break 10506 } 10507 x1_1 := x1.Args[1] 10508 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] { 10509 break 10510 } 10511 x0 := x1.Args[2] 10512 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-7 || auxToSym(x0.Aux) != s { 10513 break 10514 } 10515 mem := x0.Args[2] 10516 if p != x0.Args[0] { 10517 break 10518 } 10519 x0_1 := x0.Args[1] 10520 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) { 10521 break 10522 } 10523 v.reset(OpAMD64MOVQstore) 10524 v.AuxInt = int32ToAuxInt(i - 7) 10525 v.Aux = symToAux(s) 10526 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, typ.UInt64) 10527 v0.AddArg(w) 10528 v.AddArg3(p, v0, mem) 10529 return true 10530 } 10531 // match: (MOVBstore [i] {s} p7 w x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w) x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w) x4:(MOVBstore [i] {s} p4 (SHRQconst [24] w) x3:(MOVBstore [i] {s} p3 (SHRQconst [32] w) x2:(MOVBstore [i] {s} p2 (SHRQconst [40] w) x1:(MOVBstore [i] {s} p1 (SHRQconst [48] w) x0:(MOVBstore [i] {s} p0 (SHRQconst [56] w) mem)))))))) 10532 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6) 10533 // result: (MOVQstore [i] {s} p0 (BSWAPQ <typ.UInt64> w) mem) 10534 for { 10535 i := auxIntToInt32(v.AuxInt) 10536 s := auxToSym(v.Aux) 10537 p7 := v_0 10538 w := v_1 10539 x6 := v_2 10540 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i || auxToSym(x6.Aux) != s { 10541 break 10542 } 10543 _ = x6.Args[2] 10544 p6 := x6.Args[0] 10545 x6_1 := x6.Args[1] 10546 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] { 10547 break 10548 } 10549 x5 := x6.Args[2] 10550 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i || auxToSym(x5.Aux) != s { 10551 break 10552 } 10553 _ = x5.Args[2] 10554 p5 := x5.Args[0] 10555 x5_1 := x5.Args[1] 10556 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] { 10557 break 10558 } 10559 x4 := x5.Args[2] 10560 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i || auxToSym(x4.Aux) != s { 10561 break 10562 } 10563 _ = x4.Args[2] 10564 p4 := x4.Args[0] 10565 x4_1 := x4.Args[1] 10566 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] { 10567 break 10568 } 10569 x3 := x4.Args[2] 10570 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i || auxToSym(x3.Aux) != s { 10571 break 10572 } 10573 _ = x3.Args[2] 10574 p3 := x3.Args[0] 10575 x3_1 := x3.Args[1] 10576 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] { 10577 break 10578 } 10579 x2 := x3.Args[2] 10580 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s { 10581 break 10582 } 10583 _ = x2.Args[2] 10584 p2 := x2.Args[0] 10585 x2_1 := x2.Args[1] 10586 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] { 10587 break 10588 } 10589 x1 := x2.Args[2] 10590 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { 10591 break 10592 } 10593 _ = x1.Args[2] 10594 p1 := x1.Args[0] 10595 x1_1 := x1.Args[1] 10596 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] { 10597 break 10598 } 10599 x0 := x1.Args[2] 10600 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { 10601 break 10602 } 10603 mem := x0.Args[2] 10604 p0 := x0.Args[0] 10605 x0_1 := x0.Args[1] 10606 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) { 10607 break 10608 } 10609 v.reset(OpAMD64MOVQstore) 10610 v.AuxInt = int32ToAuxInt(i) 10611 v.Aux = symToAux(s) 10612 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, typ.UInt64) 10613 v0.AddArg(w) 10614 v.AddArg3(p0, v0, mem) 10615 return true 10616 } 10617 // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 10618 // cond: x.Uses == 1 && clobber(x) 10619 // result: (MOVWstore [i-1] {s} p w mem) 10620 for { 10621 i := auxIntToInt32(v.AuxInt) 10622 s := auxToSym(v.Aux) 10623 p := v_0 10624 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 { 10625 break 10626 } 10627 w := v_1.Args[0] 10628 x := v_2 10629 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { 10630 break 10631 } 10632 mem := x.Args[2] 10633 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) { 10634 break 10635 } 10636 v.reset(OpAMD64MOVWstore) 10637 v.AuxInt = int32ToAuxInt(i - 1) 10638 v.Aux = symToAux(s) 10639 v.AddArg3(p, w, mem) 10640 return true 10641 } 10642 // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 10643 // cond: x.Uses == 1 && clobber(x) 10644 // result: (MOVWstore [i-1] {s} p w mem) 10645 for { 10646 i := auxIntToInt32(v.AuxInt) 10647 s := auxToSym(v.Aux) 10648 p := v_0 10649 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 { 10650 break 10651 } 10652 w := v_1.Args[0] 10653 x := v_2 10654 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { 10655 break 10656 } 10657 mem := x.Args[2] 10658 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) { 10659 break 10660 } 10661 v.reset(OpAMD64MOVWstore) 10662 v.AuxInt = int32ToAuxInt(i - 1) 10663 v.Aux = symToAux(s) 10664 v.AddArg3(p, w, mem) 10665 return true 10666 } 10667 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 10668 // cond: x.Uses == 1 && clobber(x) 10669 // result: (MOVWstore [i-1] {s} p w mem) 10670 for { 10671 i := auxIntToInt32(v.AuxInt) 10672 s := auxToSym(v.Aux) 10673 p := v_0 10674 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 { 10675 break 10676 } 10677 w := v_1.Args[0] 10678 x := v_2 10679 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { 10680 break 10681 } 10682 mem := x.Args[2] 10683 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) { 10684 break 10685 } 10686 v.reset(OpAMD64MOVWstore) 10687 v.AuxInt = int32ToAuxInt(i - 1) 10688 v.Aux = symToAux(s) 10689 v.AddArg3(p, w, mem) 10690 return true 10691 } 10692 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem)) 10693 // cond: x.Uses == 1 && clobber(x) 10694 // result: (MOVWstore [i] {s} p w mem) 10695 for { 10696 i := auxIntToInt32(v.AuxInt) 10697 s := auxToSym(v.Aux) 10698 p := v_0 10699 w := v_1 10700 x := v_2 10701 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s { 10702 break 10703 } 10704 mem := x.Args[2] 10705 if p != x.Args[0] { 10706 break 10707 } 10708 x_1 := x.Args[1] 10709 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) { 10710 break 10711 } 10712 v.reset(OpAMD64MOVWstore) 10713 v.AuxInt = int32ToAuxInt(i) 10714 v.Aux = symToAux(s) 10715 v.AddArg3(p, w, mem) 10716 return true 10717 } 10718 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem)) 10719 // cond: x.Uses == 1 && clobber(x) 10720 // result: (MOVWstore [i] {s} p w mem) 10721 for { 10722 i := auxIntToInt32(v.AuxInt) 10723 s := auxToSym(v.Aux) 10724 p := v_0 10725 w := v_1 10726 x := v_2 10727 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s { 10728 break 10729 } 10730 mem := x.Args[2] 10731 if p != x.Args[0] { 10732 break 10733 } 10734 x_1 := x.Args[1] 10735 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) { 10736 break 10737 } 10738 v.reset(OpAMD64MOVWstore) 10739 v.AuxInt = int32ToAuxInt(i) 10740 v.Aux = symToAux(s) 10741 v.AddArg3(p, w, mem) 10742 return true 10743 } 10744 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem)) 10745 // cond: x.Uses == 1 && clobber(x) 10746 // result: (MOVWstore [i] {s} p w mem) 10747 for { 10748 i := auxIntToInt32(v.AuxInt) 10749 s := auxToSym(v.Aux) 10750 p := v_0 10751 w := v_1 10752 x := v_2 10753 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s { 10754 break 10755 } 10756 mem := x.Args[2] 10757 if p != x.Args[0] { 10758 break 10759 } 10760 x_1 := x.Args[1] 10761 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) { 10762 break 10763 } 10764 v.reset(OpAMD64MOVWstore) 10765 v.AuxInt = int32ToAuxInt(i) 10766 v.Aux = symToAux(s) 10767 v.AddArg3(p, w, mem) 10768 return true 10769 } 10770 // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) 10771 // cond: x.Uses == 1 && clobber(x) 10772 // result: (MOVWstore [i-1] {s} p w0 mem) 10773 for { 10774 i := auxIntToInt32(v.AuxInt) 10775 s := auxToSym(v.Aux) 10776 p := v_0 10777 if v_1.Op != OpAMD64SHRLconst { 10778 break 10779 } 10780 j := auxIntToInt8(v_1.AuxInt) 10781 w := v_1.Args[0] 10782 x := v_2 10783 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { 10784 break 10785 } 10786 mem := x.Args[2] 10787 if p != x.Args[0] { 10788 break 10789 } 10790 w0 := x.Args[1] 10791 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 10792 break 10793 } 10794 v.reset(OpAMD64MOVWstore) 10795 v.AuxInt = int32ToAuxInt(i - 1) 10796 v.Aux = symToAux(s) 10797 v.AddArg3(p, w0, mem) 10798 return true 10799 } 10800 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 10801 // cond: x.Uses == 1 && clobber(x) 10802 // result: (MOVWstore [i-1] {s} p w0 mem) 10803 for { 10804 i := auxIntToInt32(v.AuxInt) 10805 s := auxToSym(v.Aux) 10806 p := v_0 10807 if v_1.Op != OpAMD64SHRQconst { 10808 break 10809 } 10810 j := auxIntToInt8(v_1.AuxInt) 10811 w := v_1.Args[0] 10812 x := v_2 10813 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { 10814 break 10815 } 10816 mem := x.Args[2] 10817 if p != x.Args[0] { 10818 break 10819 } 10820 w0 := x.Args[1] 10821 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 10822 break 10823 } 10824 v.reset(OpAMD64MOVWstore) 10825 v.AuxInt = int32ToAuxInt(i - 1) 10826 v.Aux = symToAux(s) 10827 v.AddArg3(p, w0, mem) 10828 return true 10829 } 10830 // match: (MOVBstore [i] {s} p1 (SHRWconst [8] w) x:(MOVBstore [i] {s} p0 w mem)) 10831 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) 10832 // result: (MOVWstore [i] {s} p0 w mem) 10833 for { 10834 i := auxIntToInt32(v.AuxInt) 10835 s := auxToSym(v.Aux) 10836 p1 := v_0 10837 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 { 10838 break 10839 } 10840 w := v_1.Args[0] 10841 x := v_2 10842 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 10843 break 10844 } 10845 mem := x.Args[2] 10846 p0 := x.Args[0] 10847 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { 10848 break 10849 } 10850 v.reset(OpAMD64MOVWstore) 10851 v.AuxInt = int32ToAuxInt(i) 10852 v.Aux = symToAux(s) 10853 v.AddArg3(p0, w, mem) 10854 return true 10855 } 10856 // match: (MOVBstore [i] {s} p1 (SHRLconst [8] w) x:(MOVBstore [i] {s} p0 w mem)) 10857 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) 10858 // result: (MOVWstore [i] {s} p0 w mem) 10859 for { 10860 i := auxIntToInt32(v.AuxInt) 10861 s := auxToSym(v.Aux) 10862 p1 := v_0 10863 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 { 10864 break 10865 } 10866 w := v_1.Args[0] 10867 x := v_2 10868 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 10869 break 10870 } 10871 mem := x.Args[2] 10872 p0 := x.Args[0] 10873 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { 10874 break 10875 } 10876 v.reset(OpAMD64MOVWstore) 10877 v.AuxInt = int32ToAuxInt(i) 10878 v.Aux = symToAux(s) 10879 v.AddArg3(p0, w, mem) 10880 return true 10881 } 10882 // match: (MOVBstore [i] {s} p1 (SHRQconst [8] w) x:(MOVBstore [i] {s} p0 w mem)) 10883 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) 10884 // result: (MOVWstore [i] {s} p0 w mem) 10885 for { 10886 i := auxIntToInt32(v.AuxInt) 10887 s := auxToSym(v.Aux) 10888 p1 := v_0 10889 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 { 10890 break 10891 } 10892 w := v_1.Args[0] 10893 x := v_2 10894 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 10895 break 10896 } 10897 mem := x.Args[2] 10898 p0 := x.Args[0] 10899 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { 10900 break 10901 } 10902 v.reset(OpAMD64MOVWstore) 10903 v.AuxInt = int32ToAuxInt(i) 10904 v.Aux = symToAux(s) 10905 v.AddArg3(p0, w, mem) 10906 return true 10907 } 10908 // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRWconst [8] w) mem)) 10909 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) 10910 // result: (MOVWstore [i] {s} p0 w mem) 10911 for { 10912 i := auxIntToInt32(v.AuxInt) 10913 s := auxToSym(v.Aux) 10914 p0 := v_0 10915 w := v_1 10916 x := v_2 10917 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 10918 break 10919 } 10920 mem := x.Args[2] 10921 p1 := x.Args[0] 10922 x_1 := x.Args[1] 10923 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { 10924 break 10925 } 10926 v.reset(OpAMD64MOVWstore) 10927 v.AuxInt = int32ToAuxInt(i) 10928 v.Aux = symToAux(s) 10929 v.AddArg3(p0, w, mem) 10930 return true 10931 } 10932 // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRLconst [8] w) mem)) 10933 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) 10934 // result: (MOVWstore [i] {s} p0 w mem) 10935 for { 10936 i := auxIntToInt32(v.AuxInt) 10937 s := auxToSym(v.Aux) 10938 p0 := v_0 10939 w := v_1 10940 x := v_2 10941 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 10942 break 10943 } 10944 mem := x.Args[2] 10945 p1 := x.Args[0] 10946 x_1 := x.Args[1] 10947 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { 10948 break 10949 } 10950 v.reset(OpAMD64MOVWstore) 10951 v.AuxInt = int32ToAuxInt(i) 10952 v.Aux = symToAux(s) 10953 v.AddArg3(p0, w, mem) 10954 return true 10955 } 10956 // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRQconst [8] w) mem)) 10957 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) 10958 // result: (MOVWstore [i] {s} p0 w mem) 10959 for { 10960 i := auxIntToInt32(v.AuxInt) 10961 s := auxToSym(v.Aux) 10962 p0 := v_0 10963 w := v_1 10964 x := v_2 10965 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 10966 break 10967 } 10968 mem := x.Args[2] 10969 p1 := x.Args[0] 10970 x_1 := x.Args[1] 10971 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { 10972 break 10973 } 10974 v.reset(OpAMD64MOVWstore) 10975 v.AuxInt = int32ToAuxInt(i) 10976 v.Aux = symToAux(s) 10977 v.AddArg3(p0, w, mem) 10978 return true 10979 } 10980 // match: (MOVBstore [i] {s} p1 (SHRLconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRLconst [j-8] w) mem)) 10981 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) 10982 // result: (MOVWstore [i] {s} p0 w0 mem) 10983 for { 10984 i := auxIntToInt32(v.AuxInt) 10985 s := auxToSym(v.Aux) 10986 p1 := v_0 10987 if v_1.Op != OpAMD64SHRLconst { 10988 break 10989 } 10990 j := auxIntToInt8(v_1.AuxInt) 10991 w := v_1.Args[0] 10992 x := v_2 10993 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 10994 break 10995 } 10996 mem := x.Args[2] 10997 p0 := x.Args[0] 10998 w0 := x.Args[1] 10999 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { 11000 break 11001 } 11002 v.reset(OpAMD64MOVWstore) 11003 v.AuxInt = int32ToAuxInt(i) 11004 v.Aux = symToAux(s) 11005 v.AddArg3(p0, w0, mem) 11006 return true 11007 } 11008 // match: (MOVBstore [i] {s} p1 (SHRQconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRQconst [j-8] w) mem)) 11009 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) 11010 // result: (MOVWstore [i] {s} p0 w0 mem) 11011 for { 11012 i := auxIntToInt32(v.AuxInt) 11013 s := auxToSym(v.Aux) 11014 p1 := v_0 11015 if v_1.Op != OpAMD64SHRQconst { 11016 break 11017 } 11018 j := auxIntToInt8(v_1.AuxInt) 11019 w := v_1.Args[0] 11020 x := v_2 11021 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 11022 break 11023 } 11024 mem := x.Args[2] 11025 p0 := x.Args[0] 11026 w0 := x.Args[1] 11027 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { 11028 break 11029 } 11030 v.reset(OpAMD64MOVWstore) 11031 v.AuxInt = int32ToAuxInt(i) 11032 v.Aux = symToAux(s) 11033 v.AddArg3(p0, w0, mem) 11034 return true 11035 } 11036 // match: (MOVBstore [c3] {s} p3 (SHRQconst [56] w) x1:(MOVWstore [c2] {s} p2 (SHRQconst [40] w) x2:(MOVLstore [c1] {s} p1 (SHRQconst [8] w) x3:(MOVBstore [c0] {s} p0 w mem)))) 11037 // cond: x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && sequentialAddresses(p0, p1, int64(1 + c0 - c1)) && sequentialAddresses(p0, p2, int64(5 + c0 - c2)) && sequentialAddresses(p0, p3, int64(7 + c0 - c3)) && clobber(x1, x2, x3) 11038 // result: (MOVQstore [c0] {s} p0 w mem) 11039 for { 11040 c3 := auxIntToInt32(v.AuxInt) 11041 s := auxToSym(v.Aux) 11042 p3 := v_0 11043 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 56 { 11044 break 11045 } 11046 w := v_1.Args[0] 11047 x1 := v_2 11048 if x1.Op != OpAMD64MOVWstore { 11049 break 11050 } 11051 c2 := auxIntToInt32(x1.AuxInt) 11052 if auxToSym(x1.Aux) != s { 11053 break 11054 } 11055 _ = x1.Args[2] 11056 p2 := x1.Args[0] 11057 x1_1 := x1.Args[1] 11058 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 40 || w != x1_1.Args[0] { 11059 break 11060 } 11061 x2 := x1.Args[2] 11062 if x2.Op != OpAMD64MOVLstore { 11063 break 11064 } 11065 c1 := auxIntToInt32(x2.AuxInt) 11066 if auxToSym(x2.Aux) != s { 11067 break 11068 } 11069 _ = x2.Args[2] 11070 p1 := x2.Args[0] 11071 x2_1 := x2.Args[1] 11072 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] { 11073 break 11074 } 11075 x3 := x2.Args[2] 11076 if x3.Op != OpAMD64MOVBstore { 11077 break 11078 } 11079 c0 := auxIntToInt32(x3.AuxInt) 11080 if auxToSym(x3.Aux) != s { 11081 break 11082 } 11083 mem := x3.Args[2] 11084 p0 := x3.Args[0] 11085 if w != x3.Args[1] || !(x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && sequentialAddresses(p0, p1, int64(1+c0-c1)) && sequentialAddresses(p0, p2, int64(5+c0-c2)) && sequentialAddresses(p0, p3, int64(7+c0-c3)) && clobber(x1, x2, x3)) { 11086 break 11087 } 11088 v.reset(OpAMD64MOVQstore) 11089 v.AuxInt = int32ToAuxInt(c0) 11090 v.Aux = symToAux(s) 11091 v.AddArg3(p0, w, mem) 11092 return true 11093 } 11094 // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) 11095 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2) 11096 // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) 11097 for { 11098 i := auxIntToInt32(v.AuxInt) 11099 s := auxToSym(v.Aux) 11100 p := v_0 11101 x1 := v_1 11102 if x1.Op != OpAMD64MOVBload { 11103 break 11104 } 11105 j := auxIntToInt32(x1.AuxInt) 11106 s2 := auxToSym(x1.Aux) 11107 mem := x1.Args[1] 11108 p2 := x1.Args[0] 11109 mem2 := v_2 11110 if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s { 11111 break 11112 } 11113 _ = mem2.Args[2] 11114 if p != mem2.Args[0] { 11115 break 11116 } 11117 x2 := mem2.Args[1] 11118 if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 { 11119 break 11120 } 11121 _ = x2.Args[1] 11122 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) { 11123 break 11124 } 11125 v.reset(OpAMD64MOVWstore) 11126 v.AuxInt = int32ToAuxInt(i - 1) 11127 v.Aux = symToAux(s) 11128 v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16) 11129 v0.AuxInt = int32ToAuxInt(j - 1) 11130 v0.Aux = symToAux(s2) 11131 v0.AddArg2(p2, mem) 11132 v.AddArg3(p, v0, mem) 11133 return true 11134 } 11135 return false 11136 } 11137 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { 11138 v_1 := v.Args[1] 11139 v_0 := v.Args[0] 11140 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 11141 // cond: ValAndOff(sc).canAdd32(off) 11142 // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) 11143 for { 11144 sc := auxIntToValAndOff(v.AuxInt) 11145 s := auxToSym(v.Aux) 11146 if v_0.Op != OpAMD64ADDQconst { 11147 break 11148 } 11149 off := auxIntToInt32(v_0.AuxInt) 11150 ptr := v_0.Args[0] 11151 mem := v_1 11152 if !(ValAndOff(sc).canAdd32(off)) { 11153 break 11154 } 11155 v.reset(OpAMD64MOVBstoreconst) 11156 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 11157 v.Aux = symToAux(s) 11158 v.AddArg2(ptr, mem) 11159 return true 11160 } 11161 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 11162 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) 11163 // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) 11164 for { 11165 sc := auxIntToValAndOff(v.AuxInt) 11166 sym1 := auxToSym(v.Aux) 11167 if v_0.Op != OpAMD64LEAQ { 11168 break 11169 } 11170 off := auxIntToInt32(v_0.AuxInt) 11171 sym2 := auxToSym(v_0.Aux) 11172 ptr := v_0.Args[0] 11173 mem := v_1 11174 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { 11175 break 11176 } 11177 v.reset(OpAMD64MOVBstoreconst) 11178 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 11179 v.Aux = symToAux(mergeSym(sym1, sym2)) 11180 v.AddArg2(ptr, mem) 11181 return true 11182 } 11183 // match: (MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem)) 11184 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+1-c.Off())) && clobber(x) 11185 // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p0 mem) 11186 for { 11187 c := auxIntToValAndOff(v.AuxInt) 11188 s := auxToSym(v.Aux) 11189 p1 := v_0 11190 x := v_1 11191 if x.Op != OpAMD64MOVBstoreconst { 11192 break 11193 } 11194 a := auxIntToValAndOff(x.AuxInt) 11195 if auxToSym(x.Aux) != s { 11196 break 11197 } 11198 mem := x.Args[1] 11199 p0 := x.Args[0] 11200 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+1-c.Off())) && clobber(x)) { 11201 break 11202 } 11203 v.reset(OpAMD64MOVWstoreconst) 11204 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off())) 11205 v.Aux = symToAux(s) 11206 v.AddArg2(p0, mem) 11207 return true 11208 } 11209 // match: (MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem)) 11210 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+1-c.Off())) && clobber(x) 11211 // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p0 mem) 11212 for { 11213 a := auxIntToValAndOff(v.AuxInt) 11214 s := auxToSym(v.Aux) 11215 p0 := v_0 11216 x := v_1 11217 if x.Op != OpAMD64MOVBstoreconst { 11218 break 11219 } 11220 c := auxIntToValAndOff(x.AuxInt) 11221 if auxToSym(x.Aux) != s { 11222 break 11223 } 11224 mem := x.Args[1] 11225 p1 := x.Args[0] 11226 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+1-c.Off())) && clobber(x)) { 11227 break 11228 } 11229 v.reset(OpAMD64MOVWstoreconst) 11230 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off())) 11231 v.Aux = symToAux(s) 11232 v.AddArg2(p0, mem) 11233 return true 11234 } 11235 return false 11236 } 11237 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { 11238 v_0 := v.Args[0] 11239 b := v.Block 11240 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 11241 // cond: x.Uses == 1 && clobber(x) 11242 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 11243 for { 11244 x := v_0 11245 if x.Op != OpAMD64MOVLload { 11246 break 11247 } 11248 off := auxIntToInt32(x.AuxInt) 11249 sym := auxToSym(x.Aux) 11250 mem := x.Args[1] 11251 ptr := x.Args[0] 11252 if !(x.Uses == 1 && clobber(x)) { 11253 break 11254 } 11255 b = x.Block 11256 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) 11257 v.copyOf(v0) 11258 v0.AuxInt = int32ToAuxInt(off) 11259 v0.Aux = symToAux(sym) 11260 v0.AddArg2(ptr, mem) 11261 return true 11262 } 11263 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 11264 // cond: x.Uses == 1 && clobber(x) 11265 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 11266 for { 11267 x := v_0 11268 if x.Op != OpAMD64MOVQload { 11269 break 11270 } 11271 off := auxIntToInt32(x.AuxInt) 11272 sym := auxToSym(x.Aux) 11273 mem := x.Args[1] 11274 ptr := x.Args[0] 11275 if !(x.Uses == 1 && clobber(x)) { 11276 break 11277 } 11278 b = x.Block 11279 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) 11280 v.copyOf(v0) 11281 v0.AuxInt = int32ToAuxInt(off) 11282 v0.Aux = symToAux(sym) 11283 v0.AddArg2(ptr, mem) 11284 return true 11285 } 11286 // match: (MOVLQSX (ANDLconst [c] x)) 11287 // cond: uint32(c) & 0x80000000 == 0 11288 // result: (ANDLconst [c & 0x7fffffff] x) 11289 for { 11290 if v_0.Op != OpAMD64ANDLconst { 11291 break 11292 } 11293 c := auxIntToInt32(v_0.AuxInt) 11294 x := v_0.Args[0] 11295 if !(uint32(c)&0x80000000 == 0) { 11296 break 11297 } 11298 v.reset(OpAMD64ANDLconst) 11299 v.AuxInt = int32ToAuxInt(c & 0x7fffffff) 11300 v.AddArg(x) 11301 return true 11302 } 11303 // match: (MOVLQSX (MOVLQSX x)) 11304 // result: (MOVLQSX x) 11305 for { 11306 if v_0.Op != OpAMD64MOVLQSX { 11307 break 11308 } 11309 x := v_0.Args[0] 11310 v.reset(OpAMD64MOVLQSX) 11311 v.AddArg(x) 11312 return true 11313 } 11314 // match: (MOVLQSX (MOVWQSX x)) 11315 // result: (MOVWQSX x) 11316 for { 11317 if v_0.Op != OpAMD64MOVWQSX { 11318 break 11319 } 11320 x := v_0.Args[0] 11321 v.reset(OpAMD64MOVWQSX) 11322 v.AddArg(x) 11323 return true 11324 } 11325 // match: (MOVLQSX (MOVBQSX x)) 11326 // result: (MOVBQSX x) 11327 for { 11328 if v_0.Op != OpAMD64MOVBQSX { 11329 break 11330 } 11331 x := v_0.Args[0] 11332 v.reset(OpAMD64MOVBQSX) 11333 v.AddArg(x) 11334 return true 11335 } 11336 return false 11337 } 11338 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool { 11339 v_1 := v.Args[1] 11340 v_0 := v.Args[0] 11341 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 11342 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 11343 // result: (MOVLQSX x) 11344 for { 11345 off := auxIntToInt32(v.AuxInt) 11346 sym := auxToSym(v.Aux) 11347 ptr := v_0 11348 if v_1.Op != OpAMD64MOVLstore { 11349 break 11350 } 11351 off2 := auxIntToInt32(v_1.AuxInt) 11352 sym2 := auxToSym(v_1.Aux) 11353 x := v_1.Args[1] 11354 ptr2 := v_1.Args[0] 11355 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 11356 break 11357 } 11358 v.reset(OpAMD64MOVLQSX) 11359 v.AddArg(x) 11360 return true 11361 } 11362 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11363 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 11364 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11365 for { 11366 off1 := auxIntToInt32(v.AuxInt) 11367 sym1 := auxToSym(v.Aux) 11368 if v_0.Op != OpAMD64LEAQ { 11369 break 11370 } 11371 off2 := auxIntToInt32(v_0.AuxInt) 11372 sym2 := auxToSym(v_0.Aux) 11373 base := v_0.Args[0] 11374 mem := v_1 11375 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 11376 break 11377 } 11378 v.reset(OpAMD64MOVLQSXload) 11379 v.AuxInt = int32ToAuxInt(off1 + off2) 11380 v.Aux = symToAux(mergeSym(sym1, sym2)) 11381 v.AddArg2(base, mem) 11382 return true 11383 } 11384 return false 11385 } 11386 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { 11387 v_0 := v.Args[0] 11388 b := v.Block 11389 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 11390 // cond: x.Uses == 1 && clobber(x) 11391 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 11392 for { 11393 x := v_0 11394 if x.Op != OpAMD64MOVLload { 11395 break 11396 } 11397 off := auxIntToInt32(x.AuxInt) 11398 sym := auxToSym(x.Aux) 11399 mem := x.Args[1] 11400 ptr := x.Args[0] 11401 if !(x.Uses == 1 && clobber(x)) { 11402 break 11403 } 11404 b = x.Block 11405 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) 11406 v.copyOf(v0) 11407 v0.AuxInt = int32ToAuxInt(off) 11408 v0.Aux = symToAux(sym) 11409 v0.AddArg2(ptr, mem) 11410 return true 11411 } 11412 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 11413 // cond: x.Uses == 1 && clobber(x) 11414 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 11415 for { 11416 x := v_0 11417 if x.Op != OpAMD64MOVQload { 11418 break 11419 } 11420 off := auxIntToInt32(x.AuxInt) 11421 sym := auxToSym(x.Aux) 11422 mem := x.Args[1] 11423 ptr := x.Args[0] 11424 if !(x.Uses == 1 && clobber(x)) { 11425 break 11426 } 11427 b = x.Block 11428 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) 11429 v.copyOf(v0) 11430 v0.AuxInt = int32ToAuxInt(off) 11431 v0.Aux = symToAux(sym) 11432 v0.AddArg2(ptr, mem) 11433 return true 11434 } 11435 // match: (MOVLQZX x) 11436 // cond: zeroUpper32Bits(x,3) 11437 // result: x 11438 for { 11439 x := v_0 11440 if !(zeroUpper32Bits(x, 3)) { 11441 break 11442 } 11443 v.copyOf(x) 11444 return true 11445 } 11446 // match: (MOVLQZX (ANDLconst [c] x)) 11447 // result: (ANDLconst [c] x) 11448 for { 11449 if v_0.Op != OpAMD64ANDLconst { 11450 break 11451 } 11452 c := auxIntToInt32(v_0.AuxInt) 11453 x := v_0.Args[0] 11454 v.reset(OpAMD64ANDLconst) 11455 v.AuxInt = int32ToAuxInt(c) 11456 v.AddArg(x) 11457 return true 11458 } 11459 // match: (MOVLQZX (MOVLQZX x)) 11460 // result: (MOVLQZX x) 11461 for { 11462 if v_0.Op != OpAMD64MOVLQZX { 11463 break 11464 } 11465 x := v_0.Args[0] 11466 v.reset(OpAMD64MOVLQZX) 11467 v.AddArg(x) 11468 return true 11469 } 11470 // match: (MOVLQZX (MOVWQZX x)) 11471 // result: (MOVWQZX x) 11472 for { 11473 if v_0.Op != OpAMD64MOVWQZX { 11474 break 11475 } 11476 x := v_0.Args[0] 11477 v.reset(OpAMD64MOVWQZX) 11478 v.AddArg(x) 11479 return true 11480 } 11481 // match: (MOVLQZX (MOVBQZX x)) 11482 // result: (MOVBQZX x) 11483 for { 11484 if v_0.Op != OpAMD64MOVBQZX { 11485 break 11486 } 11487 x := v_0.Args[0] 11488 v.reset(OpAMD64MOVBQZX) 11489 v.AddArg(x) 11490 return true 11491 } 11492 return false 11493 } 11494 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool { 11495 v_1 := v.Args[1] 11496 v_0 := v.Args[0] 11497 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 11498 // cond: is32Bit(int64(off1)+int64(off2)) 11499 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 11500 for { 11501 off1 := auxIntToInt32(v.AuxInt) 11502 sym := auxToSym(v.Aux) 11503 if v_0.Op != OpAMD64ADDQconst { 11504 break 11505 } 11506 off2 := auxIntToInt32(v_0.AuxInt) 11507 ptr := v_0.Args[0] 11508 mem := v_1 11509 if !(is32Bit(int64(off1) + int64(off2))) { 11510 break 11511 } 11512 v.reset(OpAMD64MOVLatomicload) 11513 v.AuxInt = int32ToAuxInt(off1 + off2) 11514 v.Aux = symToAux(sym) 11515 v.AddArg2(ptr, mem) 11516 return true 11517 } 11518 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 11519 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 11520 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem) 11521 for { 11522 off1 := auxIntToInt32(v.AuxInt) 11523 sym1 := auxToSym(v.Aux) 11524 if v_0.Op != OpAMD64LEAQ { 11525 break 11526 } 11527 off2 := auxIntToInt32(v_0.AuxInt) 11528 sym2 := auxToSym(v_0.Aux) 11529 ptr := v_0.Args[0] 11530 mem := v_1 11531 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 11532 break 11533 } 11534 v.reset(OpAMD64MOVLatomicload) 11535 v.AuxInt = int32ToAuxInt(off1 + off2) 11536 v.Aux = symToAux(mergeSym(sym1, sym2)) 11537 v.AddArg2(ptr, mem) 11538 return true 11539 } 11540 return false 11541 } 11542 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool { 11543 v_0 := v.Args[0] 11544 b := v.Block 11545 // match: (MOVLf2i <t> (Arg <u> [off] {sym})) 11546 // cond: t.Size() == u.Size() 11547 // result: @b.Func.Entry (Arg <t> [off] {sym}) 11548 for { 11549 t := v.Type 11550 if v_0.Op != OpArg { 11551 break 11552 } 11553 u := v_0.Type 11554 off := auxIntToInt32(v_0.AuxInt) 11555 sym := auxToSym(v_0.Aux) 11556 if !(t.Size() == u.Size()) { 11557 break 11558 } 11559 b = b.Func.Entry 11560 v0 := b.NewValue0(v.Pos, OpArg, t) 11561 v.copyOf(v0) 11562 v0.AuxInt = int32ToAuxInt(off) 11563 v0.Aux = symToAux(sym) 11564 return true 11565 } 11566 return false 11567 } 11568 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool { 11569 v_0 := v.Args[0] 11570 b := v.Block 11571 // match: (MOVLi2f <t> (Arg <u> [off] {sym})) 11572 // cond: t.Size() == u.Size() 11573 // result: @b.Func.Entry (Arg <t> [off] {sym}) 11574 for { 11575 t := v.Type 11576 if v_0.Op != OpArg { 11577 break 11578 } 11579 u := v_0.Type 11580 off := auxIntToInt32(v_0.AuxInt) 11581 sym := auxToSym(v_0.Aux) 11582 if !(t.Size() == u.Size()) { 11583 break 11584 } 11585 b = b.Func.Entry 11586 v0 := b.NewValue0(v.Pos, OpArg, t) 11587 v.copyOf(v0) 11588 v0.AuxInt = int32ToAuxInt(off) 11589 v0.Aux = symToAux(sym) 11590 return true 11591 } 11592 return false 11593 } 11594 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { 11595 v_1 := v.Args[1] 11596 v_0 := v.Args[0] 11597 b := v.Block 11598 config := b.Func.Config 11599 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 11600 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 11601 // result: (MOVLQZX x) 11602 for { 11603 off := auxIntToInt32(v.AuxInt) 11604 sym := auxToSym(v.Aux) 11605 ptr := v_0 11606 if v_1.Op != OpAMD64MOVLstore { 11607 break 11608 } 11609 off2 := auxIntToInt32(v_1.AuxInt) 11610 sym2 := auxToSym(v_1.Aux) 11611 x := v_1.Args[1] 11612 ptr2 := v_1.Args[0] 11613 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 11614 break 11615 } 11616 v.reset(OpAMD64MOVLQZX) 11617 v.AddArg(x) 11618 return true 11619 } 11620 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 11621 // cond: is32Bit(int64(off1)+int64(off2)) 11622 // result: (MOVLload [off1+off2] {sym} ptr mem) 11623 for { 11624 off1 := auxIntToInt32(v.AuxInt) 11625 sym := auxToSym(v.Aux) 11626 if v_0.Op != OpAMD64ADDQconst { 11627 break 11628 } 11629 off2 := auxIntToInt32(v_0.AuxInt) 11630 ptr := v_0.Args[0] 11631 mem := v_1 11632 if !(is32Bit(int64(off1) + int64(off2))) { 11633 break 11634 } 11635 v.reset(OpAMD64MOVLload) 11636 v.AuxInt = int32ToAuxInt(off1 + off2) 11637 v.Aux = symToAux(sym) 11638 v.AddArg2(ptr, mem) 11639 return true 11640 } 11641 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11642 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 11643 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11644 for { 11645 off1 := auxIntToInt32(v.AuxInt) 11646 sym1 := auxToSym(v.Aux) 11647 if v_0.Op != OpAMD64LEAQ { 11648 break 11649 } 11650 off2 := auxIntToInt32(v_0.AuxInt) 11651 sym2 := auxToSym(v_0.Aux) 11652 base := v_0.Args[0] 11653 mem := v_1 11654 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 11655 break 11656 } 11657 v.reset(OpAMD64MOVLload) 11658 v.AuxInt = int32ToAuxInt(off1 + off2) 11659 v.Aux = symToAux(mergeSym(sym1, sym2)) 11660 v.AddArg2(base, mem) 11661 return true 11662 } 11663 // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) 11664 // result: (MOVLf2i val) 11665 for { 11666 off := auxIntToInt32(v.AuxInt) 11667 sym := auxToSym(v.Aux) 11668 ptr := v_0 11669 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { 11670 break 11671 } 11672 val := v_1.Args[1] 11673 if ptr != v_1.Args[0] { 11674 break 11675 } 11676 v.reset(OpAMD64MOVLf2i) 11677 v.AddArg(val) 11678 return true 11679 } 11680 // match: (MOVLload [off] {sym} (SB) _) 11681 // cond: symIsRO(sym) 11682 // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) 11683 for { 11684 off := auxIntToInt32(v.AuxInt) 11685 sym := auxToSym(v.Aux) 11686 if v_0.Op != OpSB || !(symIsRO(sym)) { 11687 break 11688 } 11689 v.reset(OpAMD64MOVQconst) 11690 v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))) 11691 return true 11692 } 11693 return false 11694 } 11695 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { 11696 v_2 := v.Args[2] 11697 v_1 := v.Args[1] 11698 v_0 := v.Args[0] 11699 b := v.Block 11700 typ := &b.Func.Config.Types 11701 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 11702 // result: (MOVLstore [off] {sym} ptr x mem) 11703 for { 11704 off := auxIntToInt32(v.AuxInt) 11705 sym := auxToSym(v.Aux) 11706 ptr := v_0 11707 if v_1.Op != OpAMD64MOVLQSX { 11708 break 11709 } 11710 x := v_1.Args[0] 11711 mem := v_2 11712 v.reset(OpAMD64MOVLstore) 11713 v.AuxInt = int32ToAuxInt(off) 11714 v.Aux = symToAux(sym) 11715 v.AddArg3(ptr, x, mem) 11716 return true 11717 } 11718 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 11719 // result: (MOVLstore [off] {sym} ptr x mem) 11720 for { 11721 off := auxIntToInt32(v.AuxInt) 11722 sym := auxToSym(v.Aux) 11723 ptr := v_0 11724 if v_1.Op != OpAMD64MOVLQZX { 11725 break 11726 } 11727 x := v_1.Args[0] 11728 mem := v_2 11729 v.reset(OpAMD64MOVLstore) 11730 v.AuxInt = int32ToAuxInt(off) 11731 v.Aux = symToAux(sym) 11732 v.AddArg3(ptr, x, mem) 11733 return true 11734 } 11735 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11736 // cond: is32Bit(int64(off1)+int64(off2)) 11737 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 11738 for { 11739 off1 := auxIntToInt32(v.AuxInt) 11740 sym := auxToSym(v.Aux) 11741 if v_0.Op != OpAMD64ADDQconst { 11742 break 11743 } 11744 off2 := auxIntToInt32(v_0.AuxInt) 11745 ptr := v_0.Args[0] 11746 val := v_1 11747 mem := v_2 11748 if !(is32Bit(int64(off1) + int64(off2))) { 11749 break 11750 } 11751 v.reset(OpAMD64MOVLstore) 11752 v.AuxInt = int32ToAuxInt(off1 + off2) 11753 v.Aux = symToAux(sym) 11754 v.AddArg3(ptr, val, mem) 11755 return true 11756 } 11757 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 11758 // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) 11759 for { 11760 off := auxIntToInt32(v.AuxInt) 11761 sym := auxToSym(v.Aux) 11762 ptr := v_0 11763 if v_1.Op != OpAMD64MOVLconst { 11764 break 11765 } 11766 c := auxIntToInt32(v_1.AuxInt) 11767 mem := v_2 11768 v.reset(OpAMD64MOVLstoreconst) 11769 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 11770 v.Aux = symToAux(sym) 11771 v.AddArg2(ptr, mem) 11772 return true 11773 } 11774 // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem) 11775 // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) 11776 for { 11777 off := auxIntToInt32(v.AuxInt) 11778 sym := auxToSym(v.Aux) 11779 ptr := v_0 11780 if v_1.Op != OpAMD64MOVQconst { 11781 break 11782 } 11783 c := auxIntToInt64(v_1.AuxInt) 11784 mem := v_2 11785 v.reset(OpAMD64MOVLstoreconst) 11786 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 11787 v.Aux = symToAux(sym) 11788 v.AddArg2(ptr, mem) 11789 return true 11790 } 11791 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11792 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 11793 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11794 for { 11795 off1 := auxIntToInt32(v.AuxInt) 11796 sym1 := auxToSym(v.Aux) 11797 if v_0.Op != OpAMD64LEAQ { 11798 break 11799 } 11800 off2 := auxIntToInt32(v_0.AuxInt) 11801 sym2 := auxToSym(v_0.Aux) 11802 base := v_0.Args[0] 11803 val := v_1 11804 mem := v_2 11805 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 11806 break 11807 } 11808 v.reset(OpAMD64MOVLstore) 11809 v.AuxInt = int32ToAuxInt(off1 + off2) 11810 v.Aux = symToAux(mergeSym(sym1, sym2)) 11811 v.AddArg3(base, val, mem) 11812 return true 11813 } 11814 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 11815 // cond: x.Uses == 1 && clobber(x) 11816 // result: (MOVQstore [i-4] {s} p w mem) 11817 for { 11818 i := auxIntToInt32(v.AuxInt) 11819 s := auxToSym(v.Aux) 11820 p := v_0 11821 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 { 11822 break 11823 } 11824 w := v_1.Args[0] 11825 x := v_2 11826 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { 11827 break 11828 } 11829 mem := x.Args[2] 11830 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) { 11831 break 11832 } 11833 v.reset(OpAMD64MOVQstore) 11834 v.AuxInt = int32ToAuxInt(i - 4) 11835 v.Aux = symToAux(s) 11836 v.AddArg3(p, w, mem) 11837 return true 11838 } 11839 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 11840 // cond: x.Uses == 1 && clobber(x) 11841 // result: (MOVQstore [i-4] {s} p w0 mem) 11842 for { 11843 i := auxIntToInt32(v.AuxInt) 11844 s := auxToSym(v.Aux) 11845 p := v_0 11846 if v_1.Op != OpAMD64SHRQconst { 11847 break 11848 } 11849 j := auxIntToInt8(v_1.AuxInt) 11850 w := v_1.Args[0] 11851 x := v_2 11852 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { 11853 break 11854 } 11855 mem := x.Args[2] 11856 if p != x.Args[0] { 11857 break 11858 } 11859 w0 := x.Args[1] 11860 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 11861 break 11862 } 11863 v.reset(OpAMD64MOVQstore) 11864 v.AuxInt = int32ToAuxInt(i - 4) 11865 v.Aux = symToAux(s) 11866 v.AddArg3(p, w0, mem) 11867 return true 11868 } 11869 // match: (MOVLstore [i] {s} p1 (SHRQconst [32] w) x:(MOVLstore [i] {s} p0 w mem)) 11870 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x) 11871 // result: (MOVQstore [i] {s} p0 w mem) 11872 for { 11873 i := auxIntToInt32(v.AuxInt) 11874 s := auxToSym(v.Aux) 11875 p1 := v_0 11876 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 { 11877 break 11878 } 11879 w := v_1.Args[0] 11880 x := v_2 11881 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 11882 break 11883 } 11884 mem := x.Args[2] 11885 p0 := x.Args[0] 11886 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) { 11887 break 11888 } 11889 v.reset(OpAMD64MOVQstore) 11890 v.AuxInt = int32ToAuxInt(i) 11891 v.Aux = symToAux(s) 11892 v.AddArg3(p0, w, mem) 11893 return true 11894 } 11895 // match: (MOVLstore [i] {s} p1 (SHRQconst [j] w) x:(MOVLstore [i] {s} p0 w0:(SHRQconst [j-32] w) mem)) 11896 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x) 11897 // result: (MOVQstore [i] {s} p0 w0 mem) 11898 for { 11899 i := auxIntToInt32(v.AuxInt) 11900 s := auxToSym(v.Aux) 11901 p1 := v_0 11902 if v_1.Op != OpAMD64SHRQconst { 11903 break 11904 } 11905 j := auxIntToInt8(v_1.AuxInt) 11906 w := v_1.Args[0] 11907 x := v_2 11908 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 11909 break 11910 } 11911 mem := x.Args[2] 11912 p0 := x.Args[0] 11913 w0 := x.Args[1] 11914 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) { 11915 break 11916 } 11917 v.reset(OpAMD64MOVQstore) 11918 v.AuxInt = int32ToAuxInt(i) 11919 v.Aux = symToAux(s) 11920 v.AddArg3(p0, w0, mem) 11921 return true 11922 } 11923 // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) 11924 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2) 11925 // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) 11926 for { 11927 i := auxIntToInt32(v.AuxInt) 11928 s := auxToSym(v.Aux) 11929 p := v_0 11930 x1 := v_1 11931 if x1.Op != OpAMD64MOVLload { 11932 break 11933 } 11934 j := auxIntToInt32(x1.AuxInt) 11935 s2 := auxToSym(x1.Aux) 11936 mem := x1.Args[1] 11937 p2 := x1.Args[0] 11938 mem2 := v_2 11939 if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s { 11940 break 11941 } 11942 _ = mem2.Args[2] 11943 if p != mem2.Args[0] { 11944 break 11945 } 11946 x2 := mem2.Args[1] 11947 if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 { 11948 break 11949 } 11950 _ = x2.Args[1] 11951 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) { 11952 break 11953 } 11954 v.reset(OpAMD64MOVQstore) 11955 v.AuxInt = int32ToAuxInt(i - 4) 11956 v.Aux = symToAux(s) 11957 v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64) 11958 v0.AuxInt = int32ToAuxInt(j - 4) 11959 v0.Aux = symToAux(s2) 11960 v0.AddArg2(p2, mem) 11961 v.AddArg3(p, v0, mem) 11962 return true 11963 } 11964 // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) 11965 // cond: y.Uses==1 && clobber(y) 11966 // result: (ADDLmodify [off] {sym} ptr x mem) 11967 for { 11968 off := auxIntToInt32(v.AuxInt) 11969 sym := auxToSym(v.Aux) 11970 ptr := v_0 11971 y := v_1 11972 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 11973 break 11974 } 11975 mem := y.Args[2] 11976 x := y.Args[0] 11977 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 11978 break 11979 } 11980 v.reset(OpAMD64ADDLmodify) 11981 v.AuxInt = int32ToAuxInt(off) 11982 v.Aux = symToAux(sym) 11983 v.AddArg3(ptr, x, mem) 11984 return true 11985 } 11986 // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) 11987 // cond: y.Uses==1 && clobber(y) 11988 // result: (ANDLmodify [off] {sym} ptr x mem) 11989 for { 11990 off := auxIntToInt32(v.AuxInt) 11991 sym := auxToSym(v.Aux) 11992 ptr := v_0 11993 y := v_1 11994 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 11995 break 11996 } 11997 mem := y.Args[2] 11998 x := y.Args[0] 11999 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 12000 break 12001 } 12002 v.reset(OpAMD64ANDLmodify) 12003 v.AuxInt = int32ToAuxInt(off) 12004 v.Aux = symToAux(sym) 12005 v.AddArg3(ptr, x, mem) 12006 return true 12007 } 12008 // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) 12009 // cond: y.Uses==1 && clobber(y) 12010 // result: (ORLmodify [off] {sym} ptr x mem) 12011 for { 12012 off := auxIntToInt32(v.AuxInt) 12013 sym := auxToSym(v.Aux) 12014 ptr := v_0 12015 y := v_1 12016 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 12017 break 12018 } 12019 mem := y.Args[2] 12020 x := y.Args[0] 12021 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 12022 break 12023 } 12024 v.reset(OpAMD64ORLmodify) 12025 v.AuxInt = int32ToAuxInt(off) 12026 v.Aux = symToAux(sym) 12027 v.AddArg3(ptr, x, mem) 12028 return true 12029 } 12030 // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) 12031 // cond: y.Uses==1 && clobber(y) 12032 // result: (XORLmodify [off] {sym} ptr x mem) 12033 for { 12034 off := auxIntToInt32(v.AuxInt) 12035 sym := auxToSym(v.Aux) 12036 ptr := v_0 12037 y := v_1 12038 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 12039 break 12040 } 12041 mem := y.Args[2] 12042 x := y.Args[0] 12043 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 12044 break 12045 } 12046 v.reset(OpAMD64XORLmodify) 12047 v.AuxInt = int32ToAuxInt(off) 12048 v.Aux = symToAux(sym) 12049 v.AddArg3(ptr, x, mem) 12050 return true 12051 } 12052 // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) 12053 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 12054 // result: (ADDLmodify [off] {sym} ptr x mem) 12055 for { 12056 off := auxIntToInt32(v.AuxInt) 12057 sym := auxToSym(v.Aux) 12058 ptr := v_0 12059 y := v_1 12060 if y.Op != OpAMD64ADDL { 12061 break 12062 } 12063 _ = y.Args[1] 12064 y_0 := y.Args[0] 12065 y_1 := y.Args[1] 12066 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 12067 l := y_0 12068 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12069 continue 12070 } 12071 mem := l.Args[1] 12072 if ptr != l.Args[0] { 12073 continue 12074 } 12075 x := y_1 12076 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 12077 continue 12078 } 12079 v.reset(OpAMD64ADDLmodify) 12080 v.AuxInt = int32ToAuxInt(off) 12081 v.Aux = symToAux(sym) 12082 v.AddArg3(ptr, x, mem) 12083 return true 12084 } 12085 break 12086 } 12087 // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) 12088 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 12089 // result: (SUBLmodify [off] {sym} ptr x mem) 12090 for { 12091 off := auxIntToInt32(v.AuxInt) 12092 sym := auxToSym(v.Aux) 12093 ptr := v_0 12094 y := v_1 12095 if y.Op != OpAMD64SUBL { 12096 break 12097 } 12098 x := y.Args[1] 12099 l := y.Args[0] 12100 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12101 break 12102 } 12103 mem := l.Args[1] 12104 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 12105 break 12106 } 12107 v.reset(OpAMD64SUBLmodify) 12108 v.AuxInt = int32ToAuxInt(off) 12109 v.Aux = symToAux(sym) 12110 v.AddArg3(ptr, x, mem) 12111 return true 12112 } 12113 // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) 12114 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 12115 // result: (ANDLmodify [off] {sym} ptr x mem) 12116 for { 12117 off := auxIntToInt32(v.AuxInt) 12118 sym := auxToSym(v.Aux) 12119 ptr := v_0 12120 y := v_1 12121 if y.Op != OpAMD64ANDL { 12122 break 12123 } 12124 _ = y.Args[1] 12125 y_0 := y.Args[0] 12126 y_1 := y.Args[1] 12127 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 12128 l := y_0 12129 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12130 continue 12131 } 12132 mem := l.Args[1] 12133 if ptr != l.Args[0] { 12134 continue 12135 } 12136 x := y_1 12137 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 12138 continue 12139 } 12140 v.reset(OpAMD64ANDLmodify) 12141 v.AuxInt = int32ToAuxInt(off) 12142 v.Aux = symToAux(sym) 12143 v.AddArg3(ptr, x, mem) 12144 return true 12145 } 12146 break 12147 } 12148 // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem) 12149 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 12150 // result: (ORLmodify [off] {sym} ptr x mem) 12151 for { 12152 off := auxIntToInt32(v.AuxInt) 12153 sym := auxToSym(v.Aux) 12154 ptr := v_0 12155 y := v_1 12156 if y.Op != OpAMD64ORL { 12157 break 12158 } 12159 _ = y.Args[1] 12160 y_0 := y.Args[0] 12161 y_1 := y.Args[1] 12162 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 12163 l := y_0 12164 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12165 continue 12166 } 12167 mem := l.Args[1] 12168 if ptr != l.Args[0] { 12169 continue 12170 } 12171 x := y_1 12172 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 12173 continue 12174 } 12175 v.reset(OpAMD64ORLmodify) 12176 v.AuxInt = int32ToAuxInt(off) 12177 v.Aux = symToAux(sym) 12178 v.AddArg3(ptr, x, mem) 12179 return true 12180 } 12181 break 12182 } 12183 // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem) 12184 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 12185 // result: (XORLmodify [off] {sym} ptr x mem) 12186 for { 12187 off := auxIntToInt32(v.AuxInt) 12188 sym := auxToSym(v.Aux) 12189 ptr := v_0 12190 y := v_1 12191 if y.Op != OpAMD64XORL { 12192 break 12193 } 12194 _ = y.Args[1] 12195 y_0 := y.Args[0] 12196 y_1 := y.Args[1] 12197 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 12198 l := y_0 12199 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12200 continue 12201 } 12202 mem := l.Args[1] 12203 if ptr != l.Args[0] { 12204 continue 12205 } 12206 x := y_1 12207 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 12208 continue 12209 } 12210 v.reset(OpAMD64XORLmodify) 12211 v.AuxInt = int32ToAuxInt(off) 12212 v.Aux = symToAux(sym) 12213 v.AddArg3(ptr, x, mem) 12214 return true 12215 } 12216 break 12217 } 12218 // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 12219 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 12220 // result: (ADDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 12221 for { 12222 off := auxIntToInt32(v.AuxInt) 12223 sym := auxToSym(v.Aux) 12224 ptr := v_0 12225 a := v_1 12226 if a.Op != OpAMD64ADDLconst { 12227 break 12228 } 12229 c := auxIntToInt32(a.AuxInt) 12230 l := a.Args[0] 12231 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12232 break 12233 } 12234 mem := l.Args[1] 12235 ptr2 := l.Args[0] 12236 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 12237 break 12238 } 12239 v.reset(OpAMD64ADDLconstmodify) 12240 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 12241 v.Aux = symToAux(sym) 12242 v.AddArg2(ptr, mem) 12243 return true 12244 } 12245 // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 12246 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 12247 // result: (ANDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 12248 for { 12249 off := auxIntToInt32(v.AuxInt) 12250 sym := auxToSym(v.Aux) 12251 ptr := v_0 12252 a := v_1 12253 if a.Op != OpAMD64ANDLconst { 12254 break 12255 } 12256 c := auxIntToInt32(a.AuxInt) 12257 l := a.Args[0] 12258 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12259 break 12260 } 12261 mem := l.Args[1] 12262 ptr2 := l.Args[0] 12263 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 12264 break 12265 } 12266 v.reset(OpAMD64ANDLconstmodify) 12267 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 12268 v.Aux = symToAux(sym) 12269 v.AddArg2(ptr, mem) 12270 return true 12271 } 12272 // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 12273 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 12274 // result: (ORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 12275 for { 12276 off := auxIntToInt32(v.AuxInt) 12277 sym := auxToSym(v.Aux) 12278 ptr := v_0 12279 a := v_1 12280 if a.Op != OpAMD64ORLconst { 12281 break 12282 } 12283 c := auxIntToInt32(a.AuxInt) 12284 l := a.Args[0] 12285 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12286 break 12287 } 12288 mem := l.Args[1] 12289 ptr2 := l.Args[0] 12290 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 12291 break 12292 } 12293 v.reset(OpAMD64ORLconstmodify) 12294 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 12295 v.Aux = symToAux(sym) 12296 v.AddArg2(ptr, mem) 12297 return true 12298 } 12299 // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 12300 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 12301 // result: (XORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 12302 for { 12303 off := auxIntToInt32(v.AuxInt) 12304 sym := auxToSym(v.Aux) 12305 ptr := v_0 12306 a := v_1 12307 if a.Op != OpAMD64XORLconst { 12308 break 12309 } 12310 c := auxIntToInt32(a.AuxInt) 12311 l := a.Args[0] 12312 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12313 break 12314 } 12315 mem := l.Args[1] 12316 ptr2 := l.Args[0] 12317 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 12318 break 12319 } 12320 v.reset(OpAMD64XORLconstmodify) 12321 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 12322 v.Aux = symToAux(sym) 12323 v.AddArg2(ptr, mem) 12324 return true 12325 } 12326 // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) 12327 // result: (MOVSSstore [off] {sym} ptr val mem) 12328 for { 12329 off := auxIntToInt32(v.AuxInt) 12330 sym := auxToSym(v.Aux) 12331 ptr := v_0 12332 if v_1.Op != OpAMD64MOVLf2i { 12333 break 12334 } 12335 val := v_1.Args[0] 12336 mem := v_2 12337 v.reset(OpAMD64MOVSSstore) 12338 v.AuxInt = int32ToAuxInt(off) 12339 v.Aux = symToAux(sym) 12340 v.AddArg3(ptr, val, mem) 12341 return true 12342 } 12343 // match: (MOVLstore [i] {s} p x:(BSWAPL w) mem) 12344 // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 12345 // result: (MOVBELstore [i] {s} p w mem) 12346 for { 12347 i := auxIntToInt32(v.AuxInt) 12348 s := auxToSym(v.Aux) 12349 p := v_0 12350 x := v_1 12351 if x.Op != OpAMD64BSWAPL { 12352 break 12353 } 12354 w := x.Args[0] 12355 mem := v_2 12356 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { 12357 break 12358 } 12359 v.reset(OpAMD64MOVBELstore) 12360 v.AuxInt = int32ToAuxInt(i) 12361 v.Aux = symToAux(s) 12362 v.AddArg3(p, w, mem) 12363 return true 12364 } 12365 return false 12366 } 12367 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { 12368 v_1 := v.Args[1] 12369 v_0 := v.Args[0] 12370 b := v.Block 12371 typ := &b.Func.Config.Types 12372 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 12373 // cond: ValAndOff(sc).canAdd32(off) 12374 // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) 12375 for { 12376 sc := auxIntToValAndOff(v.AuxInt) 12377 s := auxToSym(v.Aux) 12378 if v_0.Op != OpAMD64ADDQconst { 12379 break 12380 } 12381 off := auxIntToInt32(v_0.AuxInt) 12382 ptr := v_0.Args[0] 12383 mem := v_1 12384 if !(ValAndOff(sc).canAdd32(off)) { 12385 break 12386 } 12387 v.reset(OpAMD64MOVLstoreconst) 12388 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 12389 v.Aux = symToAux(s) 12390 v.AddArg2(ptr, mem) 12391 return true 12392 } 12393 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 12394 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) 12395 // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) 12396 for { 12397 sc := auxIntToValAndOff(v.AuxInt) 12398 sym1 := auxToSym(v.Aux) 12399 if v_0.Op != OpAMD64LEAQ { 12400 break 12401 } 12402 off := auxIntToInt32(v_0.AuxInt) 12403 sym2 := auxToSym(v_0.Aux) 12404 ptr := v_0.Args[0] 12405 mem := v_1 12406 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { 12407 break 12408 } 12409 v.reset(OpAMD64MOVLstoreconst) 12410 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 12411 v.Aux = symToAux(mergeSym(sym1, sym2)) 12412 v.AddArg2(ptr, mem) 12413 return true 12414 } 12415 // match: (MOVLstoreconst [c] {s} p1 x:(MOVLstoreconst [a] {s} p0 mem)) 12416 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+4-c.Off())) && clobber(x) 12417 // result: (MOVQstore [a.Off()] {s} p0 (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem) 12418 for { 12419 c := auxIntToValAndOff(v.AuxInt) 12420 s := auxToSym(v.Aux) 12421 p1 := v_0 12422 x := v_1 12423 if x.Op != OpAMD64MOVLstoreconst { 12424 break 12425 } 12426 a := auxIntToValAndOff(x.AuxInt) 12427 if auxToSym(x.Aux) != s { 12428 break 12429 } 12430 mem := x.Args[1] 12431 p0 := x.Args[0] 12432 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+4-c.Off())) && clobber(x)) { 12433 break 12434 } 12435 v.reset(OpAMD64MOVQstore) 12436 v.AuxInt = int32ToAuxInt(a.Off()) 12437 v.Aux = symToAux(s) 12438 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) 12439 v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32) 12440 v.AddArg3(p0, v0, mem) 12441 return true 12442 } 12443 // match: (MOVLstoreconst [a] {s} p0 x:(MOVLstoreconst [c] {s} p1 mem)) 12444 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+4-c.Off())) && clobber(x) 12445 // result: (MOVQstore [a.Off()] {s} p0 (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem) 12446 for { 12447 a := auxIntToValAndOff(v.AuxInt) 12448 s := auxToSym(v.Aux) 12449 p0 := v_0 12450 x := v_1 12451 if x.Op != OpAMD64MOVLstoreconst { 12452 break 12453 } 12454 c := auxIntToValAndOff(x.AuxInt) 12455 if auxToSym(x.Aux) != s { 12456 break 12457 } 12458 mem := x.Args[1] 12459 p1 := x.Args[0] 12460 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+4-c.Off())) && clobber(x)) { 12461 break 12462 } 12463 v.reset(OpAMD64MOVQstore) 12464 v.AuxInt = int32ToAuxInt(a.Off()) 12465 v.Aux = symToAux(s) 12466 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) 12467 v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32) 12468 v.AddArg3(p0, v0, mem) 12469 return true 12470 } 12471 return false 12472 } 12473 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool { 12474 v_1 := v.Args[1] 12475 v_0 := v.Args[0] 12476 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 12477 // cond: is32Bit(int64(off1)+int64(off2)) 12478 // result: (MOVOload [off1+off2] {sym} ptr mem) 12479 for { 12480 off1 := auxIntToInt32(v.AuxInt) 12481 sym := auxToSym(v.Aux) 12482 if v_0.Op != OpAMD64ADDQconst { 12483 break 12484 } 12485 off2 := auxIntToInt32(v_0.AuxInt) 12486 ptr := v_0.Args[0] 12487 mem := v_1 12488 if !(is32Bit(int64(off1) + int64(off2))) { 12489 break 12490 } 12491 v.reset(OpAMD64MOVOload) 12492 v.AuxInt = int32ToAuxInt(off1 + off2) 12493 v.Aux = symToAux(sym) 12494 v.AddArg2(ptr, mem) 12495 return true 12496 } 12497 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12498 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 12499 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12500 for { 12501 off1 := auxIntToInt32(v.AuxInt) 12502 sym1 := auxToSym(v.Aux) 12503 if v_0.Op != OpAMD64LEAQ { 12504 break 12505 } 12506 off2 := auxIntToInt32(v_0.AuxInt) 12507 sym2 := auxToSym(v_0.Aux) 12508 base := v_0.Args[0] 12509 mem := v_1 12510 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 12511 break 12512 } 12513 v.reset(OpAMD64MOVOload) 12514 v.AuxInt = int32ToAuxInt(off1 + off2) 12515 v.Aux = symToAux(mergeSym(sym1, sym2)) 12516 v.AddArg2(base, mem) 12517 return true 12518 } 12519 return false 12520 } 12521 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { 12522 v_2 := v.Args[2] 12523 v_1 := v.Args[1] 12524 v_0 := v.Args[0] 12525 b := v.Block 12526 config := b.Func.Config 12527 typ := &b.Func.Config.Types 12528 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 12529 // cond: is32Bit(int64(off1)+int64(off2)) 12530 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 12531 for { 12532 off1 := auxIntToInt32(v.AuxInt) 12533 sym := auxToSym(v.Aux) 12534 if v_0.Op != OpAMD64ADDQconst { 12535 break 12536 } 12537 off2 := auxIntToInt32(v_0.AuxInt) 12538 ptr := v_0.Args[0] 12539 val := v_1 12540 mem := v_2 12541 if !(is32Bit(int64(off1) + int64(off2))) { 12542 break 12543 } 12544 v.reset(OpAMD64MOVOstore) 12545 v.AuxInt = int32ToAuxInt(off1 + off2) 12546 v.Aux = symToAux(sym) 12547 v.AddArg3(ptr, val, mem) 12548 return true 12549 } 12550 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 12551 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 12552 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 12553 for { 12554 off1 := auxIntToInt32(v.AuxInt) 12555 sym1 := auxToSym(v.Aux) 12556 if v_0.Op != OpAMD64LEAQ { 12557 break 12558 } 12559 off2 := auxIntToInt32(v_0.AuxInt) 12560 sym2 := auxToSym(v_0.Aux) 12561 base := v_0.Args[0] 12562 val := v_1 12563 mem := v_2 12564 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 12565 break 12566 } 12567 v.reset(OpAMD64MOVOstore) 12568 v.AuxInt = int32ToAuxInt(off1 + off2) 12569 v.Aux = symToAux(mergeSym(sym1, sym2)) 12570 v.AddArg3(base, val, mem) 12571 return true 12572 } 12573 // match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) 12574 // cond: symIsRO(srcSym) 12575 // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem)) 12576 for { 12577 dstOff := auxIntToInt32(v.AuxInt) 12578 dstSym := auxToSym(v.Aux) 12579 ptr := v_0 12580 if v_1.Op != OpAMD64MOVOload { 12581 break 12582 } 12583 srcOff := auxIntToInt32(v_1.AuxInt) 12584 srcSym := auxToSym(v_1.Aux) 12585 v_1_0 := v_1.Args[0] 12586 if v_1_0.Op != OpSB { 12587 break 12588 } 12589 mem := v_2 12590 if !(symIsRO(srcSym)) { 12591 break 12592 } 12593 v.reset(OpAMD64MOVQstore) 12594 v.AuxInt = int32ToAuxInt(dstOff + 8) 12595 v.Aux = symToAux(dstSym) 12596 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) 12597 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))) 12598 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem) 12599 v1.AuxInt = int32ToAuxInt(dstOff) 12600 v1.Aux = symToAux(dstSym) 12601 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) 12602 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))) 12603 v1.AddArg3(ptr, v2, mem) 12604 v.AddArg3(ptr, v0, v1) 12605 return true 12606 } 12607 return false 12608 } 12609 func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool { 12610 v_1 := v.Args[1] 12611 v_0 := v.Args[0] 12612 // match: (MOVOstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 12613 // cond: ValAndOff(sc).canAdd32(off) 12614 // result: (MOVOstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) 12615 for { 12616 sc := auxIntToValAndOff(v.AuxInt) 12617 s := auxToSym(v.Aux) 12618 if v_0.Op != OpAMD64ADDQconst { 12619 break 12620 } 12621 off := auxIntToInt32(v_0.AuxInt) 12622 ptr := v_0.Args[0] 12623 mem := v_1 12624 if !(ValAndOff(sc).canAdd32(off)) { 12625 break 12626 } 12627 v.reset(OpAMD64MOVOstoreconst) 12628 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 12629 v.Aux = symToAux(s) 12630 v.AddArg2(ptr, mem) 12631 return true 12632 } 12633 // match: (MOVOstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 12634 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) 12635 // result: (MOVOstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) 12636 for { 12637 sc := auxIntToValAndOff(v.AuxInt) 12638 sym1 := auxToSym(v.Aux) 12639 if v_0.Op != OpAMD64LEAQ { 12640 break 12641 } 12642 off := auxIntToInt32(v_0.AuxInt) 12643 sym2 := auxToSym(v_0.Aux) 12644 ptr := v_0.Args[0] 12645 mem := v_1 12646 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { 12647 break 12648 } 12649 v.reset(OpAMD64MOVOstoreconst) 12650 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 12651 v.Aux = symToAux(mergeSym(sym1, sym2)) 12652 v.AddArg2(ptr, mem) 12653 return true 12654 } 12655 return false 12656 } 12657 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool { 12658 v_1 := v.Args[1] 12659 v_0 := v.Args[0] 12660 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 12661 // cond: is32Bit(int64(off1)+int64(off2)) 12662 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 12663 for { 12664 off1 := auxIntToInt32(v.AuxInt) 12665 sym := auxToSym(v.Aux) 12666 if v_0.Op != OpAMD64ADDQconst { 12667 break 12668 } 12669 off2 := auxIntToInt32(v_0.AuxInt) 12670 ptr := v_0.Args[0] 12671 mem := v_1 12672 if !(is32Bit(int64(off1) + int64(off2))) { 12673 break 12674 } 12675 v.reset(OpAMD64MOVQatomicload) 12676 v.AuxInt = int32ToAuxInt(off1 + off2) 12677 v.Aux = symToAux(sym) 12678 v.AddArg2(ptr, mem) 12679 return true 12680 } 12681 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 12682 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 12683 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem) 12684 for { 12685 off1 := auxIntToInt32(v.AuxInt) 12686 sym1 := auxToSym(v.Aux) 12687 if v_0.Op != OpAMD64LEAQ { 12688 break 12689 } 12690 off2 := auxIntToInt32(v_0.AuxInt) 12691 sym2 := auxToSym(v_0.Aux) 12692 ptr := v_0.Args[0] 12693 mem := v_1 12694 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 12695 break 12696 } 12697 v.reset(OpAMD64MOVQatomicload) 12698 v.AuxInt = int32ToAuxInt(off1 + off2) 12699 v.Aux = symToAux(mergeSym(sym1, sym2)) 12700 v.AddArg2(ptr, mem) 12701 return true 12702 } 12703 return false 12704 } 12705 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool { 12706 v_0 := v.Args[0] 12707 b := v.Block 12708 // match: (MOVQf2i <t> (Arg <u> [off] {sym})) 12709 // cond: t.Size() == u.Size() 12710 // result: @b.Func.Entry (Arg <t> [off] {sym}) 12711 for { 12712 t := v.Type 12713 if v_0.Op != OpArg { 12714 break 12715 } 12716 u := v_0.Type 12717 off := auxIntToInt32(v_0.AuxInt) 12718 sym := auxToSym(v_0.Aux) 12719 if !(t.Size() == u.Size()) { 12720 break 12721 } 12722 b = b.Func.Entry 12723 v0 := b.NewValue0(v.Pos, OpArg, t) 12724 v.copyOf(v0) 12725 v0.AuxInt = int32ToAuxInt(off) 12726 v0.Aux = symToAux(sym) 12727 return true 12728 } 12729 return false 12730 } 12731 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool { 12732 v_0 := v.Args[0] 12733 b := v.Block 12734 // match: (MOVQi2f <t> (Arg <u> [off] {sym})) 12735 // cond: t.Size() == u.Size() 12736 // result: @b.Func.Entry (Arg <t> [off] {sym}) 12737 for { 12738 t := v.Type 12739 if v_0.Op != OpArg { 12740 break 12741 } 12742 u := v_0.Type 12743 off := auxIntToInt32(v_0.AuxInt) 12744 sym := auxToSym(v_0.Aux) 12745 if !(t.Size() == u.Size()) { 12746 break 12747 } 12748 b = b.Func.Entry 12749 v0 := b.NewValue0(v.Pos, OpArg, t) 12750 v.copyOf(v0) 12751 v0.AuxInt = int32ToAuxInt(off) 12752 v0.Aux = symToAux(sym) 12753 return true 12754 } 12755 return false 12756 } 12757 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { 12758 v_1 := v.Args[1] 12759 v_0 := v.Args[0] 12760 b := v.Block 12761 config := b.Func.Config 12762 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 12763 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12764 // result: x 12765 for { 12766 off := auxIntToInt32(v.AuxInt) 12767 sym := auxToSym(v.Aux) 12768 ptr := v_0 12769 if v_1.Op != OpAMD64MOVQstore { 12770 break 12771 } 12772 off2 := auxIntToInt32(v_1.AuxInt) 12773 sym2 := auxToSym(v_1.Aux) 12774 x := v_1.Args[1] 12775 ptr2 := v_1.Args[0] 12776 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12777 break 12778 } 12779 v.copyOf(x) 12780 return true 12781 } 12782 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 12783 // cond: is32Bit(int64(off1)+int64(off2)) 12784 // result: (MOVQload [off1+off2] {sym} ptr mem) 12785 for { 12786 off1 := auxIntToInt32(v.AuxInt) 12787 sym := auxToSym(v.Aux) 12788 if v_0.Op != OpAMD64ADDQconst { 12789 break 12790 } 12791 off2 := auxIntToInt32(v_0.AuxInt) 12792 ptr := v_0.Args[0] 12793 mem := v_1 12794 if !(is32Bit(int64(off1) + int64(off2))) { 12795 break 12796 } 12797 v.reset(OpAMD64MOVQload) 12798 v.AuxInt = int32ToAuxInt(off1 + off2) 12799 v.Aux = symToAux(sym) 12800 v.AddArg2(ptr, mem) 12801 return true 12802 } 12803 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12804 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 12805 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12806 for { 12807 off1 := auxIntToInt32(v.AuxInt) 12808 sym1 := auxToSym(v.Aux) 12809 if v_0.Op != OpAMD64LEAQ { 12810 break 12811 } 12812 off2 := auxIntToInt32(v_0.AuxInt) 12813 sym2 := auxToSym(v_0.Aux) 12814 base := v_0.Args[0] 12815 mem := v_1 12816 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 12817 break 12818 } 12819 v.reset(OpAMD64MOVQload) 12820 v.AuxInt = int32ToAuxInt(off1 + off2) 12821 v.Aux = symToAux(mergeSym(sym1, sym2)) 12822 v.AddArg2(base, mem) 12823 return true 12824 } 12825 // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) 12826 // result: (MOVQf2i val) 12827 for { 12828 off := auxIntToInt32(v.AuxInt) 12829 sym := auxToSym(v.Aux) 12830 ptr := v_0 12831 if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { 12832 break 12833 } 12834 val := v_1.Args[1] 12835 if ptr != v_1.Args[0] { 12836 break 12837 } 12838 v.reset(OpAMD64MOVQf2i) 12839 v.AddArg(val) 12840 return true 12841 } 12842 // match: (MOVQload [off] {sym} (SB) _) 12843 // cond: symIsRO(sym) 12844 // result: (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) 12845 for { 12846 off := auxIntToInt32(v.AuxInt) 12847 sym := auxToSym(v.Aux) 12848 if v_0.Op != OpSB || !(symIsRO(sym)) { 12849 break 12850 } 12851 v.reset(OpAMD64MOVQconst) 12852 v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))) 12853 return true 12854 } 12855 return false 12856 } 12857 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { 12858 v_2 := v.Args[2] 12859 v_1 := v.Args[1] 12860 v_0 := v.Args[0] 12861 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 12862 // cond: is32Bit(int64(off1)+int64(off2)) 12863 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 12864 for { 12865 off1 := auxIntToInt32(v.AuxInt) 12866 sym := auxToSym(v.Aux) 12867 if v_0.Op != OpAMD64ADDQconst { 12868 break 12869 } 12870 off2 := auxIntToInt32(v_0.AuxInt) 12871 ptr := v_0.Args[0] 12872 val := v_1 12873 mem := v_2 12874 if !(is32Bit(int64(off1) + int64(off2))) { 12875 break 12876 } 12877 v.reset(OpAMD64MOVQstore) 12878 v.AuxInt = int32ToAuxInt(off1 + off2) 12879 v.Aux = symToAux(sym) 12880 v.AddArg3(ptr, val, mem) 12881 return true 12882 } 12883 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 12884 // cond: validVal(c) 12885 // result: (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) 12886 for { 12887 off := auxIntToInt32(v.AuxInt) 12888 sym := auxToSym(v.Aux) 12889 ptr := v_0 12890 if v_1.Op != OpAMD64MOVQconst { 12891 break 12892 } 12893 c := auxIntToInt64(v_1.AuxInt) 12894 mem := v_2 12895 if !(validVal(c)) { 12896 break 12897 } 12898 v.reset(OpAMD64MOVQstoreconst) 12899 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 12900 v.Aux = symToAux(sym) 12901 v.AddArg2(ptr, mem) 12902 return true 12903 } 12904 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 12905 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 12906 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 12907 for { 12908 off1 := auxIntToInt32(v.AuxInt) 12909 sym1 := auxToSym(v.Aux) 12910 if v_0.Op != OpAMD64LEAQ { 12911 break 12912 } 12913 off2 := auxIntToInt32(v_0.AuxInt) 12914 sym2 := auxToSym(v_0.Aux) 12915 base := v_0.Args[0] 12916 val := v_1 12917 mem := v_2 12918 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 12919 break 12920 } 12921 v.reset(OpAMD64MOVQstore) 12922 v.AuxInt = int32ToAuxInt(off1 + off2) 12923 v.Aux = symToAux(mergeSym(sym1, sym2)) 12924 v.AddArg3(base, val, mem) 12925 return true 12926 } 12927 // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem) 12928 // cond: y.Uses==1 && clobber(y) 12929 // result: (ADDQmodify [off] {sym} ptr x mem) 12930 for { 12931 off := auxIntToInt32(v.AuxInt) 12932 sym := auxToSym(v.Aux) 12933 ptr := v_0 12934 y := v_1 12935 if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 12936 break 12937 } 12938 mem := y.Args[2] 12939 x := y.Args[0] 12940 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 12941 break 12942 } 12943 v.reset(OpAMD64ADDQmodify) 12944 v.AuxInt = int32ToAuxInt(off) 12945 v.Aux = symToAux(sym) 12946 v.AddArg3(ptr, x, mem) 12947 return true 12948 } 12949 // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem) 12950 // cond: y.Uses==1 && clobber(y) 12951 // result: (ANDQmodify [off] {sym} ptr x mem) 12952 for { 12953 off := auxIntToInt32(v.AuxInt) 12954 sym := auxToSym(v.Aux) 12955 ptr := v_0 12956 y := v_1 12957 if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 12958 break 12959 } 12960 mem := y.Args[2] 12961 x := y.Args[0] 12962 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 12963 break 12964 } 12965 v.reset(OpAMD64ANDQmodify) 12966 v.AuxInt = int32ToAuxInt(off) 12967 v.Aux = symToAux(sym) 12968 v.AddArg3(ptr, x, mem) 12969 return true 12970 } 12971 // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem) 12972 // cond: y.Uses==1 && clobber(y) 12973 // result: (ORQmodify [off] {sym} ptr x mem) 12974 for { 12975 off := auxIntToInt32(v.AuxInt) 12976 sym := auxToSym(v.Aux) 12977 ptr := v_0 12978 y := v_1 12979 if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 12980 break 12981 } 12982 mem := y.Args[2] 12983 x := y.Args[0] 12984 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 12985 break 12986 } 12987 v.reset(OpAMD64ORQmodify) 12988 v.AuxInt = int32ToAuxInt(off) 12989 v.Aux = symToAux(sym) 12990 v.AddArg3(ptr, x, mem) 12991 return true 12992 } 12993 // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem) 12994 // cond: y.Uses==1 && clobber(y) 12995 // result: (XORQmodify [off] {sym} ptr x mem) 12996 for { 12997 off := auxIntToInt32(v.AuxInt) 12998 sym := auxToSym(v.Aux) 12999 ptr := v_0 13000 y := v_1 13001 if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 13002 break 13003 } 13004 mem := y.Args[2] 13005 x := y.Args[0] 13006 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 13007 break 13008 } 13009 v.reset(OpAMD64XORQmodify) 13010 v.AuxInt = int32ToAuxInt(off) 13011 v.Aux = symToAux(sym) 13012 v.AddArg3(ptr, x, mem) 13013 return true 13014 } 13015 // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem) 13016 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 13017 // result: (ADDQmodify [off] {sym} ptr x mem) 13018 for { 13019 off := auxIntToInt32(v.AuxInt) 13020 sym := auxToSym(v.Aux) 13021 ptr := v_0 13022 y := v_1 13023 if y.Op != OpAMD64ADDQ { 13024 break 13025 } 13026 _ = y.Args[1] 13027 y_0 := y.Args[0] 13028 y_1 := y.Args[1] 13029 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 13030 l := y_0 13031 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 13032 continue 13033 } 13034 mem := l.Args[1] 13035 if ptr != l.Args[0] { 13036 continue 13037 } 13038 x := y_1 13039 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 13040 continue 13041 } 13042 v.reset(OpAMD64ADDQmodify) 13043 v.AuxInt = int32ToAuxInt(off) 13044 v.Aux = symToAux(sym) 13045 v.AddArg3(ptr, x, mem) 13046 return true 13047 } 13048 break 13049 } 13050 // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem) 13051 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 13052 // result: (SUBQmodify [off] {sym} ptr x mem) 13053 for { 13054 off := auxIntToInt32(v.AuxInt) 13055 sym := auxToSym(v.Aux) 13056 ptr := v_0 13057 y := v_1 13058 if y.Op != OpAMD64SUBQ { 13059 break 13060 } 13061 x := y.Args[1] 13062 l := y.Args[0] 13063 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 13064 break 13065 } 13066 mem := l.Args[1] 13067 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 13068 break 13069 } 13070 v.reset(OpAMD64SUBQmodify) 13071 v.AuxInt = int32ToAuxInt(off) 13072 v.Aux = symToAux(sym) 13073 v.AddArg3(ptr, x, mem) 13074 return true 13075 } 13076 // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem) 13077 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 13078 // result: (ANDQmodify [off] {sym} ptr x mem) 13079 for { 13080 off := auxIntToInt32(v.AuxInt) 13081 sym := auxToSym(v.Aux) 13082 ptr := v_0 13083 y := v_1 13084 if y.Op != OpAMD64ANDQ { 13085 break 13086 } 13087 _ = y.Args[1] 13088 y_0 := y.Args[0] 13089 y_1 := y.Args[1] 13090 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 13091 l := y_0 13092 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 13093 continue 13094 } 13095 mem := l.Args[1] 13096 if ptr != l.Args[0] { 13097 continue 13098 } 13099 x := y_1 13100 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 13101 continue 13102 } 13103 v.reset(OpAMD64ANDQmodify) 13104 v.AuxInt = int32ToAuxInt(off) 13105 v.Aux = symToAux(sym) 13106 v.AddArg3(ptr, x, mem) 13107 return true 13108 } 13109 break 13110 } 13111 // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem) 13112 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 13113 // result: (ORQmodify [off] {sym} ptr x mem) 13114 for { 13115 off := auxIntToInt32(v.AuxInt) 13116 sym := auxToSym(v.Aux) 13117 ptr := v_0 13118 y := v_1 13119 if y.Op != OpAMD64ORQ { 13120 break 13121 } 13122 _ = y.Args[1] 13123 y_0 := y.Args[0] 13124 y_1 := y.Args[1] 13125 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 13126 l := y_0 13127 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 13128 continue 13129 } 13130 mem := l.Args[1] 13131 if ptr != l.Args[0] { 13132 continue 13133 } 13134 x := y_1 13135 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 13136 continue 13137 } 13138 v.reset(OpAMD64ORQmodify) 13139 v.AuxInt = int32ToAuxInt(off) 13140 v.Aux = symToAux(sym) 13141 v.AddArg3(ptr, x, mem) 13142 return true 13143 } 13144 break 13145 } 13146 // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem) 13147 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 13148 // result: (XORQmodify [off] {sym} ptr x mem) 13149 for { 13150 off := auxIntToInt32(v.AuxInt) 13151 sym := auxToSym(v.Aux) 13152 ptr := v_0 13153 y := v_1 13154 if y.Op != OpAMD64XORQ { 13155 break 13156 } 13157 _ = y.Args[1] 13158 y_0 := y.Args[0] 13159 y_1 := y.Args[1] 13160 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 13161 l := y_0 13162 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 13163 continue 13164 } 13165 mem := l.Args[1] 13166 if ptr != l.Args[0] { 13167 continue 13168 } 13169 x := y_1 13170 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 13171 continue 13172 } 13173 v.reset(OpAMD64XORQmodify) 13174 v.AuxInt = int32ToAuxInt(off) 13175 v.Aux = symToAux(sym) 13176 v.AddArg3(ptr, x, mem) 13177 return true 13178 } 13179 break 13180 } 13181 // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 13182 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 13183 // result: (ADDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 13184 for { 13185 off := auxIntToInt32(v.AuxInt) 13186 sym := auxToSym(v.Aux) 13187 ptr := v_0 13188 a := v_1 13189 if a.Op != OpAMD64ADDQconst { 13190 break 13191 } 13192 c := auxIntToInt32(a.AuxInt) 13193 l := a.Args[0] 13194 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 13195 break 13196 } 13197 mem := l.Args[1] 13198 ptr2 := l.Args[0] 13199 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 13200 break 13201 } 13202 v.reset(OpAMD64ADDQconstmodify) 13203 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 13204 v.Aux = symToAux(sym) 13205 v.AddArg2(ptr, mem) 13206 return true 13207 } 13208 // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 13209 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 13210 // result: (ANDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 13211 for { 13212 off := auxIntToInt32(v.AuxInt) 13213 sym := auxToSym(v.Aux) 13214 ptr := v_0 13215 a := v_1 13216 if a.Op != OpAMD64ANDQconst { 13217 break 13218 } 13219 c := auxIntToInt32(a.AuxInt) 13220 l := a.Args[0] 13221 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 13222 break 13223 } 13224 mem := l.Args[1] 13225 ptr2 := l.Args[0] 13226 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 13227 break 13228 } 13229 v.reset(OpAMD64ANDQconstmodify) 13230 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 13231 v.Aux = symToAux(sym) 13232 v.AddArg2(ptr, mem) 13233 return true 13234 } 13235 // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 13236 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 13237 // result: (ORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 13238 for { 13239 off := auxIntToInt32(v.AuxInt) 13240 sym := auxToSym(v.Aux) 13241 ptr := v_0 13242 a := v_1 13243 if a.Op != OpAMD64ORQconst { 13244 break 13245 } 13246 c := auxIntToInt32(a.AuxInt) 13247 l := a.Args[0] 13248 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 13249 break 13250 } 13251 mem := l.Args[1] 13252 ptr2 := l.Args[0] 13253 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 13254 break 13255 } 13256 v.reset(OpAMD64ORQconstmodify) 13257 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 13258 v.Aux = symToAux(sym) 13259 v.AddArg2(ptr, mem) 13260 return true 13261 } 13262 // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 13263 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 13264 // result: (XORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 13265 for { 13266 off := auxIntToInt32(v.AuxInt) 13267 sym := auxToSym(v.Aux) 13268 ptr := v_0 13269 a := v_1 13270 if a.Op != OpAMD64XORQconst { 13271 break 13272 } 13273 c := auxIntToInt32(a.AuxInt) 13274 l := a.Args[0] 13275 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 13276 break 13277 } 13278 mem := l.Args[1] 13279 ptr2 := l.Args[0] 13280 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 13281 break 13282 } 13283 v.reset(OpAMD64XORQconstmodify) 13284 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 13285 v.Aux = symToAux(sym) 13286 v.AddArg2(ptr, mem) 13287 return true 13288 } 13289 // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) 13290 // result: (MOVSDstore [off] {sym} ptr val mem) 13291 for { 13292 off := auxIntToInt32(v.AuxInt) 13293 sym := auxToSym(v.Aux) 13294 ptr := v_0 13295 if v_1.Op != OpAMD64MOVQf2i { 13296 break 13297 } 13298 val := v_1.Args[0] 13299 mem := v_2 13300 v.reset(OpAMD64MOVSDstore) 13301 v.AuxInt = int32ToAuxInt(off) 13302 v.Aux = symToAux(sym) 13303 v.AddArg3(ptr, val, mem) 13304 return true 13305 } 13306 // match: (MOVQstore [i] {s} p x:(BSWAPQ w) mem) 13307 // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 13308 // result: (MOVBEQstore [i] {s} p w mem) 13309 for { 13310 i := auxIntToInt32(v.AuxInt) 13311 s := auxToSym(v.Aux) 13312 p := v_0 13313 x := v_1 13314 if x.Op != OpAMD64BSWAPQ { 13315 break 13316 } 13317 w := x.Args[0] 13318 mem := v_2 13319 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { 13320 break 13321 } 13322 v.reset(OpAMD64MOVBEQstore) 13323 v.AuxInt = int32ToAuxInt(i) 13324 v.Aux = symToAux(s) 13325 v.AddArg3(p, w, mem) 13326 return true 13327 } 13328 return false 13329 } 13330 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { 13331 v_1 := v.Args[1] 13332 v_0 := v.Args[0] 13333 b := v.Block 13334 config := b.Func.Config 13335 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 13336 // cond: ValAndOff(sc).canAdd32(off) 13337 // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) 13338 for { 13339 sc := auxIntToValAndOff(v.AuxInt) 13340 s := auxToSym(v.Aux) 13341 if v_0.Op != OpAMD64ADDQconst { 13342 break 13343 } 13344 off := auxIntToInt32(v_0.AuxInt) 13345 ptr := v_0.Args[0] 13346 mem := v_1 13347 if !(ValAndOff(sc).canAdd32(off)) { 13348 break 13349 } 13350 v.reset(OpAMD64MOVQstoreconst) 13351 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 13352 v.Aux = symToAux(s) 13353 v.AddArg2(ptr, mem) 13354 return true 13355 } 13356 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 13357 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) 13358 // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) 13359 for { 13360 sc := auxIntToValAndOff(v.AuxInt) 13361 sym1 := auxToSym(v.Aux) 13362 if v_0.Op != OpAMD64LEAQ { 13363 break 13364 } 13365 off := auxIntToInt32(v_0.AuxInt) 13366 sym2 := auxToSym(v_0.Aux) 13367 ptr := v_0.Args[0] 13368 mem := v_1 13369 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { 13370 break 13371 } 13372 v.reset(OpAMD64MOVQstoreconst) 13373 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 13374 v.Aux = symToAux(mergeSym(sym1, sym2)) 13375 v.AddArg2(ptr, mem) 13376 return true 13377 } 13378 // match: (MOVQstoreconst [c] {s} p1 x:(MOVQstoreconst [a] {s} p0 mem)) 13379 // cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && clobber(x) 13380 // result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem) 13381 for { 13382 c := auxIntToValAndOff(v.AuxInt) 13383 s := auxToSym(v.Aux) 13384 p1 := v_0 13385 x := v_1 13386 if x.Op != OpAMD64MOVQstoreconst { 13387 break 13388 } 13389 a := auxIntToValAndOff(x.AuxInt) 13390 if auxToSym(x.Aux) != s { 13391 break 13392 } 13393 mem := x.Args[1] 13394 p0 := x.Args[0] 13395 if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && clobber(x)) { 13396 break 13397 } 13398 v.reset(OpAMD64MOVOstoreconst) 13399 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off())) 13400 v.Aux = symToAux(s) 13401 v.AddArg2(p0, mem) 13402 return true 13403 } 13404 // match: (MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem)) 13405 // cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && clobber(x) 13406 // result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem) 13407 for { 13408 a := auxIntToValAndOff(v.AuxInt) 13409 s := auxToSym(v.Aux) 13410 p0 := v_0 13411 x := v_1 13412 if x.Op != OpAMD64MOVQstoreconst { 13413 break 13414 } 13415 c := auxIntToValAndOff(x.AuxInt) 13416 if auxToSym(x.Aux) != s { 13417 break 13418 } 13419 mem := x.Args[1] 13420 p1 := x.Args[0] 13421 if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && clobber(x)) { 13422 break 13423 } 13424 v.reset(OpAMD64MOVOstoreconst) 13425 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off())) 13426 v.Aux = symToAux(s) 13427 v.AddArg2(p0, mem) 13428 return true 13429 } 13430 return false 13431 } 13432 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { 13433 v_1 := v.Args[1] 13434 v_0 := v.Args[0] 13435 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 13436 // cond: is32Bit(int64(off1)+int64(off2)) 13437 // result: (MOVSDload [off1+off2] {sym} ptr mem) 13438 for { 13439 off1 := auxIntToInt32(v.AuxInt) 13440 sym := auxToSym(v.Aux) 13441 if v_0.Op != OpAMD64ADDQconst { 13442 break 13443 } 13444 off2 := auxIntToInt32(v_0.AuxInt) 13445 ptr := v_0.Args[0] 13446 mem := v_1 13447 if !(is32Bit(int64(off1) + int64(off2))) { 13448 break 13449 } 13450 v.reset(OpAMD64MOVSDload) 13451 v.AuxInt = int32ToAuxInt(off1 + off2) 13452 v.Aux = symToAux(sym) 13453 v.AddArg2(ptr, mem) 13454 return true 13455 } 13456 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 13457 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 13458 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 13459 for { 13460 off1 := auxIntToInt32(v.AuxInt) 13461 sym1 := auxToSym(v.Aux) 13462 if v_0.Op != OpAMD64LEAQ { 13463 break 13464 } 13465 off2 := auxIntToInt32(v_0.AuxInt) 13466 sym2 := auxToSym(v_0.Aux) 13467 base := v_0.Args[0] 13468 mem := v_1 13469 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 13470 break 13471 } 13472 v.reset(OpAMD64MOVSDload) 13473 v.AuxInt = int32ToAuxInt(off1 + off2) 13474 v.Aux = symToAux(mergeSym(sym1, sym2)) 13475 v.AddArg2(base, mem) 13476 return true 13477 } 13478 // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) 13479 // result: (MOVQi2f val) 13480 for { 13481 off := auxIntToInt32(v.AuxInt) 13482 sym := auxToSym(v.Aux) 13483 ptr := v_0 13484 if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { 13485 break 13486 } 13487 val := v_1.Args[1] 13488 if ptr != v_1.Args[0] { 13489 break 13490 } 13491 v.reset(OpAMD64MOVQi2f) 13492 v.AddArg(val) 13493 return true 13494 } 13495 return false 13496 } 13497 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { 13498 v_2 := v.Args[2] 13499 v_1 := v.Args[1] 13500 v_0 := v.Args[0] 13501 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 13502 // cond: is32Bit(int64(off1)+int64(off2)) 13503 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 13504 for { 13505 off1 := auxIntToInt32(v.AuxInt) 13506 sym := auxToSym(v.Aux) 13507 if v_0.Op != OpAMD64ADDQconst { 13508 break 13509 } 13510 off2 := auxIntToInt32(v_0.AuxInt) 13511 ptr := v_0.Args[0] 13512 val := v_1 13513 mem := v_2 13514 if !(is32Bit(int64(off1) + int64(off2))) { 13515 break 13516 } 13517 v.reset(OpAMD64MOVSDstore) 13518 v.AuxInt = int32ToAuxInt(off1 + off2) 13519 v.Aux = symToAux(sym) 13520 v.AddArg3(ptr, val, mem) 13521 return true 13522 } 13523 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 13524 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 13525 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13526 for { 13527 off1 := auxIntToInt32(v.AuxInt) 13528 sym1 := auxToSym(v.Aux) 13529 if v_0.Op != OpAMD64LEAQ { 13530 break 13531 } 13532 off2 := auxIntToInt32(v_0.AuxInt) 13533 sym2 := auxToSym(v_0.Aux) 13534 base := v_0.Args[0] 13535 val := v_1 13536 mem := v_2 13537 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 13538 break 13539 } 13540 v.reset(OpAMD64MOVSDstore) 13541 v.AuxInt = int32ToAuxInt(off1 + off2) 13542 v.Aux = symToAux(mergeSym(sym1, sym2)) 13543 v.AddArg3(base, val, mem) 13544 return true 13545 } 13546 // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) 13547 // result: (MOVQstore [off] {sym} ptr val mem) 13548 for { 13549 off := auxIntToInt32(v.AuxInt) 13550 sym := auxToSym(v.Aux) 13551 ptr := v_0 13552 if v_1.Op != OpAMD64MOVQi2f { 13553 break 13554 } 13555 val := v_1.Args[0] 13556 mem := v_2 13557 v.reset(OpAMD64MOVQstore) 13558 v.AuxInt = int32ToAuxInt(off) 13559 v.Aux = symToAux(sym) 13560 v.AddArg3(ptr, val, mem) 13561 return true 13562 } 13563 return false 13564 } 13565 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { 13566 v_1 := v.Args[1] 13567 v_0 := v.Args[0] 13568 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 13569 // cond: is32Bit(int64(off1)+int64(off2)) 13570 // result: (MOVSSload [off1+off2] {sym} ptr mem) 13571 for { 13572 off1 := auxIntToInt32(v.AuxInt) 13573 sym := auxToSym(v.Aux) 13574 if v_0.Op != OpAMD64ADDQconst { 13575 break 13576 } 13577 off2 := auxIntToInt32(v_0.AuxInt) 13578 ptr := v_0.Args[0] 13579 mem := v_1 13580 if !(is32Bit(int64(off1) + int64(off2))) { 13581 break 13582 } 13583 v.reset(OpAMD64MOVSSload) 13584 v.AuxInt = int32ToAuxInt(off1 + off2) 13585 v.Aux = symToAux(sym) 13586 v.AddArg2(ptr, mem) 13587 return true 13588 } 13589 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 13590 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 13591 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 13592 for { 13593 off1 := auxIntToInt32(v.AuxInt) 13594 sym1 := auxToSym(v.Aux) 13595 if v_0.Op != OpAMD64LEAQ { 13596 break 13597 } 13598 off2 := auxIntToInt32(v_0.AuxInt) 13599 sym2 := auxToSym(v_0.Aux) 13600 base := v_0.Args[0] 13601 mem := v_1 13602 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 13603 break 13604 } 13605 v.reset(OpAMD64MOVSSload) 13606 v.AuxInt = int32ToAuxInt(off1 + off2) 13607 v.Aux = symToAux(mergeSym(sym1, sym2)) 13608 v.AddArg2(base, mem) 13609 return true 13610 } 13611 // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) 13612 // result: (MOVLi2f val) 13613 for { 13614 off := auxIntToInt32(v.AuxInt) 13615 sym := auxToSym(v.Aux) 13616 ptr := v_0 13617 if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { 13618 break 13619 } 13620 val := v_1.Args[1] 13621 if ptr != v_1.Args[0] { 13622 break 13623 } 13624 v.reset(OpAMD64MOVLi2f) 13625 v.AddArg(val) 13626 return true 13627 } 13628 return false 13629 } 13630 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { 13631 v_2 := v.Args[2] 13632 v_1 := v.Args[1] 13633 v_0 := v.Args[0] 13634 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 13635 // cond: is32Bit(int64(off1)+int64(off2)) 13636 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 13637 for { 13638 off1 := auxIntToInt32(v.AuxInt) 13639 sym := auxToSym(v.Aux) 13640 if v_0.Op != OpAMD64ADDQconst { 13641 break 13642 } 13643 off2 := auxIntToInt32(v_0.AuxInt) 13644 ptr := v_0.Args[0] 13645 val := v_1 13646 mem := v_2 13647 if !(is32Bit(int64(off1) + int64(off2))) { 13648 break 13649 } 13650 v.reset(OpAMD64MOVSSstore) 13651 v.AuxInt = int32ToAuxInt(off1 + off2) 13652 v.Aux = symToAux(sym) 13653 v.AddArg3(ptr, val, mem) 13654 return true 13655 } 13656 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 13657 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 13658 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13659 for { 13660 off1 := auxIntToInt32(v.AuxInt) 13661 sym1 := auxToSym(v.Aux) 13662 if v_0.Op != OpAMD64LEAQ { 13663 break 13664 } 13665 off2 := auxIntToInt32(v_0.AuxInt) 13666 sym2 := auxToSym(v_0.Aux) 13667 base := v_0.Args[0] 13668 val := v_1 13669 mem := v_2 13670 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 13671 break 13672 } 13673 v.reset(OpAMD64MOVSSstore) 13674 v.AuxInt = int32ToAuxInt(off1 + off2) 13675 v.Aux = symToAux(mergeSym(sym1, sym2)) 13676 v.AddArg3(base, val, mem) 13677 return true 13678 } 13679 // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) 13680 // result: (MOVLstore [off] {sym} ptr val mem) 13681 for { 13682 off := auxIntToInt32(v.AuxInt) 13683 sym := auxToSym(v.Aux) 13684 ptr := v_0 13685 if v_1.Op != OpAMD64MOVLi2f { 13686 break 13687 } 13688 val := v_1.Args[0] 13689 mem := v_2 13690 v.reset(OpAMD64MOVLstore) 13691 v.AuxInt = int32ToAuxInt(off) 13692 v.Aux = symToAux(sym) 13693 v.AddArg3(ptr, val, mem) 13694 return true 13695 } 13696 return false 13697 } 13698 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { 13699 v_0 := v.Args[0] 13700 b := v.Block 13701 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 13702 // cond: x.Uses == 1 && clobber(x) 13703 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 13704 for { 13705 x := v_0 13706 if x.Op != OpAMD64MOVWload { 13707 break 13708 } 13709 off := auxIntToInt32(x.AuxInt) 13710 sym := auxToSym(x.Aux) 13711 mem := x.Args[1] 13712 ptr := x.Args[0] 13713 if !(x.Uses == 1 && clobber(x)) { 13714 break 13715 } 13716 b = x.Block 13717 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) 13718 v.copyOf(v0) 13719 v0.AuxInt = int32ToAuxInt(off) 13720 v0.Aux = symToAux(sym) 13721 v0.AddArg2(ptr, mem) 13722 return true 13723 } 13724 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 13725 // cond: x.Uses == 1 && clobber(x) 13726 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 13727 for { 13728 x := v_0 13729 if x.Op != OpAMD64MOVLload { 13730 break 13731 } 13732 off := auxIntToInt32(x.AuxInt) 13733 sym := auxToSym(x.Aux) 13734 mem := x.Args[1] 13735 ptr := x.Args[0] 13736 if !(x.Uses == 1 && clobber(x)) { 13737 break 13738 } 13739 b = x.Block 13740 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) 13741 v.copyOf(v0) 13742 v0.AuxInt = int32ToAuxInt(off) 13743 v0.Aux = symToAux(sym) 13744 v0.AddArg2(ptr, mem) 13745 return true 13746 } 13747 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 13748 // cond: x.Uses == 1 && clobber(x) 13749 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 13750 for { 13751 x := v_0 13752 if x.Op != OpAMD64MOVQload { 13753 break 13754 } 13755 off := auxIntToInt32(x.AuxInt) 13756 sym := auxToSym(x.Aux) 13757 mem := x.Args[1] 13758 ptr := x.Args[0] 13759 if !(x.Uses == 1 && clobber(x)) { 13760 break 13761 } 13762 b = x.Block 13763 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) 13764 v.copyOf(v0) 13765 v0.AuxInt = int32ToAuxInt(off) 13766 v0.Aux = symToAux(sym) 13767 v0.AddArg2(ptr, mem) 13768 return true 13769 } 13770 // match: (MOVWQSX (ANDLconst [c] x)) 13771 // cond: c & 0x8000 == 0 13772 // result: (ANDLconst [c & 0x7fff] x) 13773 for { 13774 if v_0.Op != OpAMD64ANDLconst { 13775 break 13776 } 13777 c := auxIntToInt32(v_0.AuxInt) 13778 x := v_0.Args[0] 13779 if !(c&0x8000 == 0) { 13780 break 13781 } 13782 v.reset(OpAMD64ANDLconst) 13783 v.AuxInt = int32ToAuxInt(c & 0x7fff) 13784 v.AddArg(x) 13785 return true 13786 } 13787 // match: (MOVWQSX (MOVWQSX x)) 13788 // result: (MOVWQSX x) 13789 for { 13790 if v_0.Op != OpAMD64MOVWQSX { 13791 break 13792 } 13793 x := v_0.Args[0] 13794 v.reset(OpAMD64MOVWQSX) 13795 v.AddArg(x) 13796 return true 13797 } 13798 // match: (MOVWQSX (MOVBQSX x)) 13799 // result: (MOVBQSX x) 13800 for { 13801 if v_0.Op != OpAMD64MOVBQSX { 13802 break 13803 } 13804 x := v_0.Args[0] 13805 v.reset(OpAMD64MOVBQSX) 13806 v.AddArg(x) 13807 return true 13808 } 13809 return false 13810 } 13811 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool { 13812 v_1 := v.Args[1] 13813 v_0 := v.Args[0] 13814 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 13815 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 13816 // result: (MOVWQSX x) 13817 for { 13818 off := auxIntToInt32(v.AuxInt) 13819 sym := auxToSym(v.Aux) 13820 ptr := v_0 13821 if v_1.Op != OpAMD64MOVWstore { 13822 break 13823 } 13824 off2 := auxIntToInt32(v_1.AuxInt) 13825 sym2 := auxToSym(v_1.Aux) 13826 x := v_1.Args[1] 13827 ptr2 := v_1.Args[0] 13828 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 13829 break 13830 } 13831 v.reset(OpAMD64MOVWQSX) 13832 v.AddArg(x) 13833 return true 13834 } 13835 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 13836 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 13837 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 13838 for { 13839 off1 := auxIntToInt32(v.AuxInt) 13840 sym1 := auxToSym(v.Aux) 13841 if v_0.Op != OpAMD64LEAQ { 13842 break 13843 } 13844 off2 := auxIntToInt32(v_0.AuxInt) 13845 sym2 := auxToSym(v_0.Aux) 13846 base := v_0.Args[0] 13847 mem := v_1 13848 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 13849 break 13850 } 13851 v.reset(OpAMD64MOVWQSXload) 13852 v.AuxInt = int32ToAuxInt(off1 + off2) 13853 v.Aux = symToAux(mergeSym(sym1, sym2)) 13854 v.AddArg2(base, mem) 13855 return true 13856 } 13857 return false 13858 } 13859 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { 13860 v_0 := v.Args[0] 13861 b := v.Block 13862 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 13863 // cond: x.Uses == 1 && clobber(x) 13864 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 13865 for { 13866 x := v_0 13867 if x.Op != OpAMD64MOVWload { 13868 break 13869 } 13870 off := auxIntToInt32(x.AuxInt) 13871 sym := auxToSym(x.Aux) 13872 mem := x.Args[1] 13873 ptr := x.Args[0] 13874 if !(x.Uses == 1 && clobber(x)) { 13875 break 13876 } 13877 b = x.Block 13878 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) 13879 v.copyOf(v0) 13880 v0.AuxInt = int32ToAuxInt(off) 13881 v0.Aux = symToAux(sym) 13882 v0.AddArg2(ptr, mem) 13883 return true 13884 } 13885 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 13886 // cond: x.Uses == 1 && clobber(x) 13887 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 13888 for { 13889 x := v_0 13890 if x.Op != OpAMD64MOVLload { 13891 break 13892 } 13893 off := auxIntToInt32(x.AuxInt) 13894 sym := auxToSym(x.Aux) 13895 mem := x.Args[1] 13896 ptr := x.Args[0] 13897 if !(x.Uses == 1 && clobber(x)) { 13898 break 13899 } 13900 b = x.Block 13901 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) 13902 v.copyOf(v0) 13903 v0.AuxInt = int32ToAuxInt(off) 13904 v0.Aux = symToAux(sym) 13905 v0.AddArg2(ptr, mem) 13906 return true 13907 } 13908 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 13909 // cond: x.Uses == 1 && clobber(x) 13910 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 13911 for { 13912 x := v_0 13913 if x.Op != OpAMD64MOVQload { 13914 break 13915 } 13916 off := auxIntToInt32(x.AuxInt) 13917 sym := auxToSym(x.Aux) 13918 mem := x.Args[1] 13919 ptr := x.Args[0] 13920 if !(x.Uses == 1 && clobber(x)) { 13921 break 13922 } 13923 b = x.Block 13924 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) 13925 v.copyOf(v0) 13926 v0.AuxInt = int32ToAuxInt(off) 13927 v0.Aux = symToAux(sym) 13928 v0.AddArg2(ptr, mem) 13929 return true 13930 } 13931 // match: (MOVWQZX x) 13932 // cond: zeroUpper48Bits(x,3) 13933 // result: x 13934 for { 13935 x := v_0 13936 if !(zeroUpper48Bits(x, 3)) { 13937 break 13938 } 13939 v.copyOf(x) 13940 return true 13941 } 13942 // match: (MOVWQZX (ANDLconst [c] x)) 13943 // result: (ANDLconst [c & 0xffff] x) 13944 for { 13945 if v_0.Op != OpAMD64ANDLconst { 13946 break 13947 } 13948 c := auxIntToInt32(v_0.AuxInt) 13949 x := v_0.Args[0] 13950 v.reset(OpAMD64ANDLconst) 13951 v.AuxInt = int32ToAuxInt(c & 0xffff) 13952 v.AddArg(x) 13953 return true 13954 } 13955 // match: (MOVWQZX (MOVWQZX x)) 13956 // result: (MOVWQZX x) 13957 for { 13958 if v_0.Op != OpAMD64MOVWQZX { 13959 break 13960 } 13961 x := v_0.Args[0] 13962 v.reset(OpAMD64MOVWQZX) 13963 v.AddArg(x) 13964 return true 13965 } 13966 // match: (MOVWQZX (MOVBQZX x)) 13967 // result: (MOVBQZX x) 13968 for { 13969 if v_0.Op != OpAMD64MOVBQZX { 13970 break 13971 } 13972 x := v_0.Args[0] 13973 v.reset(OpAMD64MOVBQZX) 13974 v.AddArg(x) 13975 return true 13976 } 13977 return false 13978 } 13979 func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { 13980 v_1 := v.Args[1] 13981 v_0 := v.Args[0] 13982 b := v.Block 13983 config := b.Func.Config 13984 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 13985 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 13986 // result: (MOVWQZX x) 13987 for { 13988 off := auxIntToInt32(v.AuxInt) 13989 sym := auxToSym(v.Aux) 13990 ptr := v_0 13991 if v_1.Op != OpAMD64MOVWstore { 13992 break 13993 } 13994 off2 := auxIntToInt32(v_1.AuxInt) 13995 sym2 := auxToSym(v_1.Aux) 13996 x := v_1.Args[1] 13997 ptr2 := v_1.Args[0] 13998 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 13999 break 14000 } 14001 v.reset(OpAMD64MOVWQZX) 14002 v.AddArg(x) 14003 return true 14004 } 14005 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 14006 // cond: is32Bit(int64(off1)+int64(off2)) 14007 // result: (MOVWload [off1+off2] {sym} ptr mem) 14008 for { 14009 off1 := auxIntToInt32(v.AuxInt) 14010 sym := auxToSym(v.Aux) 14011 if v_0.Op != OpAMD64ADDQconst { 14012 break 14013 } 14014 off2 := auxIntToInt32(v_0.AuxInt) 14015 ptr := v_0.Args[0] 14016 mem := v_1 14017 if !(is32Bit(int64(off1) + int64(off2))) { 14018 break 14019 } 14020 v.reset(OpAMD64MOVWload) 14021 v.AuxInt = int32ToAuxInt(off1 + off2) 14022 v.Aux = symToAux(sym) 14023 v.AddArg2(ptr, mem) 14024 return true 14025 } 14026 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 14027 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 14028 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 14029 for { 14030 off1 := auxIntToInt32(v.AuxInt) 14031 sym1 := auxToSym(v.Aux) 14032 if v_0.Op != OpAMD64LEAQ { 14033 break 14034 } 14035 off2 := auxIntToInt32(v_0.AuxInt) 14036 sym2 := auxToSym(v_0.Aux) 14037 base := v_0.Args[0] 14038 mem := v_1 14039 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 14040 break 14041 } 14042 v.reset(OpAMD64MOVWload) 14043 v.AuxInt = int32ToAuxInt(off1 + off2) 14044 v.Aux = symToAux(mergeSym(sym1, sym2)) 14045 v.AddArg2(base, mem) 14046 return true 14047 } 14048 // match: (MOVWload [off] {sym} (SB) _) 14049 // cond: symIsRO(sym) 14050 // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) 14051 for { 14052 off := auxIntToInt32(v.AuxInt) 14053 sym := auxToSym(v.Aux) 14054 if v_0.Op != OpSB || !(symIsRO(sym)) { 14055 break 14056 } 14057 v.reset(OpAMD64MOVLconst) 14058 v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))) 14059 return true 14060 } 14061 return false 14062 } 14063 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { 14064 v_2 := v.Args[2] 14065 v_1 := v.Args[1] 14066 v_0 := v.Args[0] 14067 b := v.Block 14068 typ := &b.Func.Config.Types 14069 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 14070 // result: (MOVWstore [off] {sym} ptr x mem) 14071 for { 14072 off := auxIntToInt32(v.AuxInt) 14073 sym := auxToSym(v.Aux) 14074 ptr := v_0 14075 if v_1.Op != OpAMD64MOVWQSX { 14076 break 14077 } 14078 x := v_1.Args[0] 14079 mem := v_2 14080 v.reset(OpAMD64MOVWstore) 14081 v.AuxInt = int32ToAuxInt(off) 14082 v.Aux = symToAux(sym) 14083 v.AddArg3(ptr, x, mem) 14084 return true 14085 } 14086 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 14087 // result: (MOVWstore [off] {sym} ptr x mem) 14088 for { 14089 off := auxIntToInt32(v.AuxInt) 14090 sym := auxToSym(v.Aux) 14091 ptr := v_0 14092 if v_1.Op != OpAMD64MOVWQZX { 14093 break 14094 } 14095 x := v_1.Args[0] 14096 mem := v_2 14097 v.reset(OpAMD64MOVWstore) 14098 v.AuxInt = int32ToAuxInt(off) 14099 v.Aux = symToAux(sym) 14100 v.AddArg3(ptr, x, mem) 14101 return true 14102 } 14103 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 14104 // cond: is32Bit(int64(off1)+int64(off2)) 14105 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 14106 for { 14107 off1 := auxIntToInt32(v.AuxInt) 14108 sym := auxToSym(v.Aux) 14109 if v_0.Op != OpAMD64ADDQconst { 14110 break 14111 } 14112 off2 := auxIntToInt32(v_0.AuxInt) 14113 ptr := v_0.Args[0] 14114 val := v_1 14115 mem := v_2 14116 if !(is32Bit(int64(off1) + int64(off2))) { 14117 break 14118 } 14119 v.reset(OpAMD64MOVWstore) 14120 v.AuxInt = int32ToAuxInt(off1 + off2) 14121 v.Aux = symToAux(sym) 14122 v.AddArg3(ptr, val, mem) 14123 return true 14124 } 14125 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 14126 // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) 14127 for { 14128 off := auxIntToInt32(v.AuxInt) 14129 sym := auxToSym(v.Aux) 14130 ptr := v_0 14131 if v_1.Op != OpAMD64MOVLconst { 14132 break 14133 } 14134 c := auxIntToInt32(v_1.AuxInt) 14135 mem := v_2 14136 v.reset(OpAMD64MOVWstoreconst) 14137 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) 14138 v.Aux = symToAux(sym) 14139 v.AddArg2(ptr, mem) 14140 return true 14141 } 14142 // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem) 14143 // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) 14144 for { 14145 off := auxIntToInt32(v.AuxInt) 14146 sym := auxToSym(v.Aux) 14147 ptr := v_0 14148 if v_1.Op != OpAMD64MOVQconst { 14149 break 14150 } 14151 c := auxIntToInt64(v_1.AuxInt) 14152 mem := v_2 14153 v.reset(OpAMD64MOVWstoreconst) 14154 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) 14155 v.Aux = symToAux(sym) 14156 v.AddArg2(ptr, mem) 14157 return true 14158 } 14159 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 14160 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 14161 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 14162 for { 14163 off1 := auxIntToInt32(v.AuxInt) 14164 sym1 := auxToSym(v.Aux) 14165 if v_0.Op != OpAMD64LEAQ { 14166 break 14167 } 14168 off2 := auxIntToInt32(v_0.AuxInt) 14169 sym2 := auxToSym(v_0.Aux) 14170 base := v_0.Args[0] 14171 val := v_1 14172 mem := v_2 14173 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 14174 break 14175 } 14176 v.reset(OpAMD64MOVWstore) 14177 v.AuxInt = int32ToAuxInt(off1 + off2) 14178 v.Aux = symToAux(mergeSym(sym1, sym2)) 14179 v.AddArg3(base, val, mem) 14180 return true 14181 } 14182 // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 14183 // cond: x.Uses == 1 && clobber(x) 14184 // result: (MOVLstore [i-2] {s} p w mem) 14185 for { 14186 i := auxIntToInt32(v.AuxInt) 14187 s := auxToSym(v.Aux) 14188 p := v_0 14189 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 { 14190 break 14191 } 14192 w := v_1.Args[0] 14193 x := v_2 14194 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { 14195 break 14196 } 14197 mem := x.Args[2] 14198 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) { 14199 break 14200 } 14201 v.reset(OpAMD64MOVLstore) 14202 v.AuxInt = int32ToAuxInt(i - 2) 14203 v.Aux = symToAux(s) 14204 v.AddArg3(p, w, mem) 14205 return true 14206 } 14207 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 14208 // cond: x.Uses == 1 && clobber(x) 14209 // result: (MOVLstore [i-2] {s} p w mem) 14210 for { 14211 i := auxIntToInt32(v.AuxInt) 14212 s := auxToSym(v.Aux) 14213 p := v_0 14214 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 { 14215 break 14216 } 14217 w := v_1.Args[0] 14218 x := v_2 14219 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { 14220 break 14221 } 14222 mem := x.Args[2] 14223 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) { 14224 break 14225 } 14226 v.reset(OpAMD64MOVLstore) 14227 v.AuxInt = int32ToAuxInt(i - 2) 14228 v.Aux = symToAux(s) 14229 v.AddArg3(p, w, mem) 14230 return true 14231 } 14232 // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) 14233 // cond: x.Uses == 1 && clobber(x) 14234 // result: (MOVLstore [i-2] {s} p w0 mem) 14235 for { 14236 i := auxIntToInt32(v.AuxInt) 14237 s := auxToSym(v.Aux) 14238 p := v_0 14239 if v_1.Op != OpAMD64SHRLconst { 14240 break 14241 } 14242 j := auxIntToInt8(v_1.AuxInt) 14243 w := v_1.Args[0] 14244 x := v_2 14245 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { 14246 break 14247 } 14248 mem := x.Args[2] 14249 if p != x.Args[0] { 14250 break 14251 } 14252 w0 := x.Args[1] 14253 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 14254 break 14255 } 14256 v.reset(OpAMD64MOVLstore) 14257 v.AuxInt = int32ToAuxInt(i - 2) 14258 v.Aux = symToAux(s) 14259 v.AddArg3(p, w0, mem) 14260 return true 14261 } 14262 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 14263 // cond: x.Uses == 1 && clobber(x) 14264 // result: (MOVLstore [i-2] {s} p w0 mem) 14265 for { 14266 i := auxIntToInt32(v.AuxInt) 14267 s := auxToSym(v.Aux) 14268 p := v_0 14269 if v_1.Op != OpAMD64SHRQconst { 14270 break 14271 } 14272 j := auxIntToInt8(v_1.AuxInt) 14273 w := v_1.Args[0] 14274 x := v_2 14275 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { 14276 break 14277 } 14278 mem := x.Args[2] 14279 if p != x.Args[0] { 14280 break 14281 } 14282 w0 := x.Args[1] 14283 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 14284 break 14285 } 14286 v.reset(OpAMD64MOVLstore) 14287 v.AuxInt = int32ToAuxInt(i - 2) 14288 v.Aux = symToAux(s) 14289 v.AddArg3(p, w0, mem) 14290 return true 14291 } 14292 // match: (MOVWstore [i] {s} p1 (SHRLconst [16] w) x:(MOVWstore [i] {s} p0 w mem)) 14293 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x) 14294 // result: (MOVLstore [i] {s} p0 w mem) 14295 for { 14296 i := auxIntToInt32(v.AuxInt) 14297 s := auxToSym(v.Aux) 14298 p1 := v_0 14299 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 { 14300 break 14301 } 14302 w := v_1.Args[0] 14303 x := v_2 14304 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 14305 break 14306 } 14307 mem := x.Args[2] 14308 p0 := x.Args[0] 14309 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) { 14310 break 14311 } 14312 v.reset(OpAMD64MOVLstore) 14313 v.AuxInt = int32ToAuxInt(i) 14314 v.Aux = symToAux(s) 14315 v.AddArg3(p0, w, mem) 14316 return true 14317 } 14318 // match: (MOVWstore [i] {s} p1 (SHRQconst [16] w) x:(MOVWstore [i] {s} p0 w mem)) 14319 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x) 14320 // result: (MOVLstore [i] {s} p0 w mem) 14321 for { 14322 i := auxIntToInt32(v.AuxInt) 14323 s := auxToSym(v.Aux) 14324 p1 := v_0 14325 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 { 14326 break 14327 } 14328 w := v_1.Args[0] 14329 x := v_2 14330 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 14331 break 14332 } 14333 mem := x.Args[2] 14334 p0 := x.Args[0] 14335 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) { 14336 break 14337 } 14338 v.reset(OpAMD64MOVLstore) 14339 v.AuxInt = int32ToAuxInt(i) 14340 v.Aux = symToAux(s) 14341 v.AddArg3(p0, w, mem) 14342 return true 14343 } 14344 // match: (MOVWstore [i] {s} p1 (SHRLconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRLconst [j-16] w) mem)) 14345 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x) 14346 // result: (MOVLstore [i] {s} p0 w0 mem) 14347 for { 14348 i := auxIntToInt32(v.AuxInt) 14349 s := auxToSym(v.Aux) 14350 p1 := v_0 14351 if v_1.Op != OpAMD64SHRLconst { 14352 break 14353 } 14354 j := auxIntToInt8(v_1.AuxInt) 14355 w := v_1.Args[0] 14356 x := v_2 14357 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 14358 break 14359 } 14360 mem := x.Args[2] 14361 p0 := x.Args[0] 14362 w0 := x.Args[1] 14363 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) { 14364 break 14365 } 14366 v.reset(OpAMD64MOVLstore) 14367 v.AuxInt = int32ToAuxInt(i) 14368 v.Aux = symToAux(s) 14369 v.AddArg3(p0, w0, mem) 14370 return true 14371 } 14372 // match: (MOVWstore [i] {s} p1 (SHRQconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRQconst [j-16] w) mem)) 14373 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x) 14374 // result: (MOVLstore [i] {s} p0 w0 mem) 14375 for { 14376 i := auxIntToInt32(v.AuxInt) 14377 s := auxToSym(v.Aux) 14378 p1 := v_0 14379 if v_1.Op != OpAMD64SHRQconst { 14380 break 14381 } 14382 j := auxIntToInt8(v_1.AuxInt) 14383 w := v_1.Args[0] 14384 x := v_2 14385 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { 14386 break 14387 } 14388 mem := x.Args[2] 14389 p0 := x.Args[0] 14390 w0 := x.Args[1] 14391 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) { 14392 break 14393 } 14394 v.reset(OpAMD64MOVLstore) 14395 v.AuxInt = int32ToAuxInt(i) 14396 v.Aux = symToAux(s) 14397 v.AddArg3(p0, w0, mem) 14398 return true 14399 } 14400 // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem)) 14401 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2) 14402 // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) 14403 for { 14404 i := auxIntToInt32(v.AuxInt) 14405 s := auxToSym(v.Aux) 14406 p := v_0 14407 x1 := v_1 14408 if x1.Op != OpAMD64MOVWload { 14409 break 14410 } 14411 j := auxIntToInt32(x1.AuxInt) 14412 s2 := auxToSym(x1.Aux) 14413 mem := x1.Args[1] 14414 p2 := x1.Args[0] 14415 mem2 := v_2 14416 if mem2.Op != OpAMD64MOVWstore || auxIntToInt32(mem2.AuxInt) != i-2 || auxToSym(mem2.Aux) != s { 14417 break 14418 } 14419 _ = mem2.Args[2] 14420 if p != mem2.Args[0] { 14421 break 14422 } 14423 x2 := mem2.Args[1] 14424 if x2.Op != OpAMD64MOVWload || auxIntToInt32(x2.AuxInt) != j-2 || auxToSym(x2.Aux) != s2 { 14425 break 14426 } 14427 _ = x2.Args[1] 14428 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) { 14429 break 14430 } 14431 v.reset(OpAMD64MOVLstore) 14432 v.AuxInt = int32ToAuxInt(i - 2) 14433 v.Aux = symToAux(s) 14434 v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32) 14435 v0.AuxInt = int32ToAuxInt(j - 2) 14436 v0.Aux = symToAux(s2) 14437 v0.AddArg2(p2, mem) 14438 v.AddArg3(p, v0, mem) 14439 return true 14440 } 14441 // match: (MOVWstore [i] {s} p x:(ROLWconst [8] w) mem) 14442 // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 14443 // result: (MOVBEWstore [i] {s} p w mem) 14444 for { 14445 i := auxIntToInt32(v.AuxInt) 14446 s := auxToSym(v.Aux) 14447 p := v_0 14448 x := v_1 14449 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 { 14450 break 14451 } 14452 w := x.Args[0] 14453 mem := v_2 14454 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { 14455 break 14456 } 14457 v.reset(OpAMD64MOVBEWstore) 14458 v.AuxInt = int32ToAuxInt(i) 14459 v.Aux = symToAux(s) 14460 v.AddArg3(p, w, mem) 14461 return true 14462 } 14463 return false 14464 } 14465 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { 14466 v_1 := v.Args[1] 14467 v_0 := v.Args[0] 14468 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 14469 // cond: ValAndOff(sc).canAdd32(off) 14470 // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) 14471 for { 14472 sc := auxIntToValAndOff(v.AuxInt) 14473 s := auxToSym(v.Aux) 14474 if v_0.Op != OpAMD64ADDQconst { 14475 break 14476 } 14477 off := auxIntToInt32(v_0.AuxInt) 14478 ptr := v_0.Args[0] 14479 mem := v_1 14480 if !(ValAndOff(sc).canAdd32(off)) { 14481 break 14482 } 14483 v.reset(OpAMD64MOVWstoreconst) 14484 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 14485 v.Aux = symToAux(s) 14486 v.AddArg2(ptr, mem) 14487 return true 14488 } 14489 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 14490 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) 14491 // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) 14492 for { 14493 sc := auxIntToValAndOff(v.AuxInt) 14494 sym1 := auxToSym(v.Aux) 14495 if v_0.Op != OpAMD64LEAQ { 14496 break 14497 } 14498 off := auxIntToInt32(v_0.AuxInt) 14499 sym2 := auxToSym(v_0.Aux) 14500 ptr := v_0.Args[0] 14501 mem := v_1 14502 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { 14503 break 14504 } 14505 v.reset(OpAMD64MOVWstoreconst) 14506 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 14507 v.Aux = symToAux(mergeSym(sym1, sym2)) 14508 v.AddArg2(ptr, mem) 14509 return true 14510 } 14511 // match: (MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem)) 14512 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+2-c.Off())) && clobber(x) 14513 // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p0 mem) 14514 for { 14515 c := auxIntToValAndOff(v.AuxInt) 14516 s := auxToSym(v.Aux) 14517 p1 := v_0 14518 x := v_1 14519 if x.Op != OpAMD64MOVWstoreconst { 14520 break 14521 } 14522 a := auxIntToValAndOff(x.AuxInt) 14523 if auxToSym(x.Aux) != s { 14524 break 14525 } 14526 mem := x.Args[1] 14527 p0 := x.Args[0] 14528 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+2-c.Off())) && clobber(x)) { 14529 break 14530 } 14531 v.reset(OpAMD64MOVLstoreconst) 14532 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off())) 14533 v.Aux = symToAux(s) 14534 v.AddArg2(p0, mem) 14535 return true 14536 } 14537 // match: (MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem)) 14538 // cond: x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+2-c.Off())) && clobber(x) 14539 // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p0 mem) 14540 for { 14541 a := auxIntToValAndOff(v.AuxInt) 14542 s := auxToSym(v.Aux) 14543 p0 := v_0 14544 x := v_1 14545 if x.Op != OpAMD64MOVWstoreconst { 14546 break 14547 } 14548 c := auxIntToValAndOff(x.AuxInt) 14549 if auxToSym(x.Aux) != s { 14550 break 14551 } 14552 mem := x.Args[1] 14553 p1 := x.Args[0] 14554 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+2-c.Off())) && clobber(x)) { 14555 break 14556 } 14557 v.reset(OpAMD64MOVLstoreconst) 14558 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off())) 14559 v.Aux = symToAux(s) 14560 v.AddArg2(p0, mem) 14561 return true 14562 } 14563 return false 14564 } 14565 func rewriteValueAMD64_OpAMD64MULL(v *Value) bool { 14566 v_1 := v.Args[1] 14567 v_0 := v.Args[0] 14568 // match: (MULL x (MOVLconst [c])) 14569 // result: (MULLconst [c] x) 14570 for { 14571 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14572 x := v_0 14573 if v_1.Op != OpAMD64MOVLconst { 14574 continue 14575 } 14576 c := auxIntToInt32(v_1.AuxInt) 14577 v.reset(OpAMD64MULLconst) 14578 v.AuxInt = int32ToAuxInt(c) 14579 v.AddArg(x) 14580 return true 14581 } 14582 break 14583 } 14584 return false 14585 } 14586 func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { 14587 v_0 := v.Args[0] 14588 b := v.Block 14589 // match: (MULLconst [c] (MULLconst [d] x)) 14590 // result: (MULLconst [c * d] x) 14591 for { 14592 c := auxIntToInt32(v.AuxInt) 14593 if v_0.Op != OpAMD64MULLconst { 14594 break 14595 } 14596 d := auxIntToInt32(v_0.AuxInt) 14597 x := v_0.Args[0] 14598 v.reset(OpAMD64MULLconst) 14599 v.AuxInt = int32ToAuxInt(c * d) 14600 v.AddArg(x) 14601 return true 14602 } 14603 // match: (MULLconst [-9] x) 14604 // result: (NEGL (LEAL8 <v.Type> x x)) 14605 for { 14606 if auxIntToInt32(v.AuxInt) != -9 { 14607 break 14608 } 14609 x := v_0 14610 v.reset(OpAMD64NEGL) 14611 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 14612 v0.AddArg2(x, x) 14613 v.AddArg(v0) 14614 return true 14615 } 14616 // match: (MULLconst [-5] x) 14617 // result: (NEGL (LEAL4 <v.Type> x x)) 14618 for { 14619 if auxIntToInt32(v.AuxInt) != -5 { 14620 break 14621 } 14622 x := v_0 14623 v.reset(OpAMD64NEGL) 14624 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 14625 v0.AddArg2(x, x) 14626 v.AddArg(v0) 14627 return true 14628 } 14629 // match: (MULLconst [-3] x) 14630 // result: (NEGL (LEAL2 <v.Type> x x)) 14631 for { 14632 if auxIntToInt32(v.AuxInt) != -3 { 14633 break 14634 } 14635 x := v_0 14636 v.reset(OpAMD64NEGL) 14637 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 14638 v0.AddArg2(x, x) 14639 v.AddArg(v0) 14640 return true 14641 } 14642 // match: (MULLconst [-1] x) 14643 // result: (NEGL x) 14644 for { 14645 if auxIntToInt32(v.AuxInt) != -1 { 14646 break 14647 } 14648 x := v_0 14649 v.reset(OpAMD64NEGL) 14650 v.AddArg(x) 14651 return true 14652 } 14653 // match: (MULLconst [ 0] _) 14654 // result: (MOVLconst [0]) 14655 for { 14656 if auxIntToInt32(v.AuxInt) != 0 { 14657 break 14658 } 14659 v.reset(OpAMD64MOVLconst) 14660 v.AuxInt = int32ToAuxInt(0) 14661 return true 14662 } 14663 // match: (MULLconst [ 1] x) 14664 // result: x 14665 for { 14666 if auxIntToInt32(v.AuxInt) != 1 { 14667 break 14668 } 14669 x := v_0 14670 v.copyOf(x) 14671 return true 14672 } 14673 // match: (MULLconst [ 3] x) 14674 // result: (LEAL2 x x) 14675 for { 14676 if auxIntToInt32(v.AuxInt) != 3 { 14677 break 14678 } 14679 x := v_0 14680 v.reset(OpAMD64LEAL2) 14681 v.AddArg2(x, x) 14682 return true 14683 } 14684 // match: (MULLconst [ 5] x) 14685 // result: (LEAL4 x x) 14686 for { 14687 if auxIntToInt32(v.AuxInt) != 5 { 14688 break 14689 } 14690 x := v_0 14691 v.reset(OpAMD64LEAL4) 14692 v.AddArg2(x, x) 14693 return true 14694 } 14695 // match: (MULLconst [ 7] x) 14696 // result: (LEAL2 x (LEAL2 <v.Type> x x)) 14697 for { 14698 if auxIntToInt32(v.AuxInt) != 7 { 14699 break 14700 } 14701 x := v_0 14702 v.reset(OpAMD64LEAL2) 14703 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 14704 v0.AddArg2(x, x) 14705 v.AddArg2(x, v0) 14706 return true 14707 } 14708 // match: (MULLconst [ 9] x) 14709 // result: (LEAL8 x x) 14710 for { 14711 if auxIntToInt32(v.AuxInt) != 9 { 14712 break 14713 } 14714 x := v_0 14715 v.reset(OpAMD64LEAL8) 14716 v.AddArg2(x, x) 14717 return true 14718 } 14719 // match: (MULLconst [11] x) 14720 // result: (LEAL2 x (LEAL4 <v.Type> x x)) 14721 for { 14722 if auxIntToInt32(v.AuxInt) != 11 { 14723 break 14724 } 14725 x := v_0 14726 v.reset(OpAMD64LEAL2) 14727 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 14728 v0.AddArg2(x, x) 14729 v.AddArg2(x, v0) 14730 return true 14731 } 14732 // match: (MULLconst [13] x) 14733 // result: (LEAL4 x (LEAL2 <v.Type> x x)) 14734 for { 14735 if auxIntToInt32(v.AuxInt) != 13 { 14736 break 14737 } 14738 x := v_0 14739 v.reset(OpAMD64LEAL4) 14740 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 14741 v0.AddArg2(x, x) 14742 v.AddArg2(x, v0) 14743 return true 14744 } 14745 // match: (MULLconst [19] x) 14746 // result: (LEAL2 x (LEAL8 <v.Type> x x)) 14747 for { 14748 if auxIntToInt32(v.AuxInt) != 19 { 14749 break 14750 } 14751 x := v_0 14752 v.reset(OpAMD64LEAL2) 14753 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 14754 v0.AddArg2(x, x) 14755 v.AddArg2(x, v0) 14756 return true 14757 } 14758 // match: (MULLconst [21] x) 14759 // result: (LEAL4 x (LEAL4 <v.Type> x x)) 14760 for { 14761 if auxIntToInt32(v.AuxInt) != 21 { 14762 break 14763 } 14764 x := v_0 14765 v.reset(OpAMD64LEAL4) 14766 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 14767 v0.AddArg2(x, x) 14768 v.AddArg2(x, v0) 14769 return true 14770 } 14771 // match: (MULLconst [25] x) 14772 // result: (LEAL8 x (LEAL2 <v.Type> x x)) 14773 for { 14774 if auxIntToInt32(v.AuxInt) != 25 { 14775 break 14776 } 14777 x := v_0 14778 v.reset(OpAMD64LEAL8) 14779 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 14780 v0.AddArg2(x, x) 14781 v.AddArg2(x, v0) 14782 return true 14783 } 14784 // match: (MULLconst [27] x) 14785 // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x)) 14786 for { 14787 if auxIntToInt32(v.AuxInt) != 27 { 14788 break 14789 } 14790 x := v_0 14791 v.reset(OpAMD64LEAL8) 14792 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 14793 v0.AddArg2(x, x) 14794 v.AddArg2(v0, v0) 14795 return true 14796 } 14797 // match: (MULLconst [37] x) 14798 // result: (LEAL4 x (LEAL8 <v.Type> x x)) 14799 for { 14800 if auxIntToInt32(v.AuxInt) != 37 { 14801 break 14802 } 14803 x := v_0 14804 v.reset(OpAMD64LEAL4) 14805 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 14806 v0.AddArg2(x, x) 14807 v.AddArg2(x, v0) 14808 return true 14809 } 14810 // match: (MULLconst [41] x) 14811 // result: (LEAL8 x (LEAL4 <v.Type> x x)) 14812 for { 14813 if auxIntToInt32(v.AuxInt) != 41 { 14814 break 14815 } 14816 x := v_0 14817 v.reset(OpAMD64LEAL8) 14818 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 14819 v0.AddArg2(x, x) 14820 v.AddArg2(x, v0) 14821 return true 14822 } 14823 // match: (MULLconst [45] x) 14824 // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x)) 14825 for { 14826 if auxIntToInt32(v.AuxInt) != 45 { 14827 break 14828 } 14829 x := v_0 14830 v.reset(OpAMD64LEAL8) 14831 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 14832 v0.AddArg2(x, x) 14833 v.AddArg2(v0, v0) 14834 return true 14835 } 14836 // match: (MULLconst [73] x) 14837 // result: (LEAL8 x (LEAL8 <v.Type> x x)) 14838 for { 14839 if auxIntToInt32(v.AuxInt) != 73 { 14840 break 14841 } 14842 x := v_0 14843 v.reset(OpAMD64LEAL8) 14844 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 14845 v0.AddArg2(x, x) 14846 v.AddArg2(x, v0) 14847 return true 14848 } 14849 // match: (MULLconst [81] x) 14850 // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x)) 14851 for { 14852 if auxIntToInt32(v.AuxInt) != 81 { 14853 break 14854 } 14855 x := v_0 14856 v.reset(OpAMD64LEAL8) 14857 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 14858 v0.AddArg2(x, x) 14859 v.AddArg2(v0, v0) 14860 return true 14861 } 14862 // match: (MULLconst [c] x) 14863 // cond: isPowerOfTwo64(int64(c)+1) && c >= 15 14864 // result: (SUBL (SHLLconst <v.Type> [int8(log64(int64(c)+1))] x) x) 14865 for { 14866 c := auxIntToInt32(v.AuxInt) 14867 x := v_0 14868 if !(isPowerOfTwo64(int64(c)+1) && c >= 15) { 14869 break 14870 } 14871 v.reset(OpAMD64SUBL) 14872 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14873 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1))) 14874 v0.AddArg(x) 14875 v.AddArg2(v0, x) 14876 return true 14877 } 14878 // match: (MULLconst [c] x) 14879 // cond: isPowerOfTwo32(c-1) && c >= 17 14880 // result: (LEAL1 (SHLLconst <v.Type> [int8(log32(c-1))] x) x) 14881 for { 14882 c := auxIntToInt32(v.AuxInt) 14883 x := v_0 14884 if !(isPowerOfTwo32(c-1) && c >= 17) { 14885 break 14886 } 14887 v.reset(OpAMD64LEAL1) 14888 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14889 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1))) 14890 v0.AddArg(x) 14891 v.AddArg2(v0, x) 14892 return true 14893 } 14894 // match: (MULLconst [c] x) 14895 // cond: isPowerOfTwo32(c-2) && c >= 34 14896 // result: (LEAL2 (SHLLconst <v.Type> [int8(log32(c-2))] x) x) 14897 for { 14898 c := auxIntToInt32(v.AuxInt) 14899 x := v_0 14900 if !(isPowerOfTwo32(c-2) && c >= 34) { 14901 break 14902 } 14903 v.reset(OpAMD64LEAL2) 14904 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14905 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2))) 14906 v0.AddArg(x) 14907 v.AddArg2(v0, x) 14908 return true 14909 } 14910 // match: (MULLconst [c] x) 14911 // cond: isPowerOfTwo32(c-4) && c >= 68 14912 // result: (LEAL4 (SHLLconst <v.Type> [int8(log32(c-4))] x) x) 14913 for { 14914 c := auxIntToInt32(v.AuxInt) 14915 x := v_0 14916 if !(isPowerOfTwo32(c-4) && c >= 68) { 14917 break 14918 } 14919 v.reset(OpAMD64LEAL4) 14920 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14921 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4))) 14922 v0.AddArg(x) 14923 v.AddArg2(v0, x) 14924 return true 14925 } 14926 // match: (MULLconst [c] x) 14927 // cond: isPowerOfTwo32(c-8) && c >= 136 14928 // result: (LEAL8 (SHLLconst <v.Type> [int8(log32(c-8))] x) x) 14929 for { 14930 c := auxIntToInt32(v.AuxInt) 14931 x := v_0 14932 if !(isPowerOfTwo32(c-8) && c >= 136) { 14933 break 14934 } 14935 v.reset(OpAMD64LEAL8) 14936 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 14937 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8))) 14938 v0.AddArg(x) 14939 v.AddArg2(v0, x) 14940 return true 14941 } 14942 // match: (MULLconst [c] x) 14943 // cond: c%3 == 0 && isPowerOfTwo32(c/3) 14944 // result: (SHLLconst [int8(log32(c/3))] (LEAL2 <v.Type> x x)) 14945 for { 14946 c := auxIntToInt32(v.AuxInt) 14947 x := v_0 14948 if !(c%3 == 0 && isPowerOfTwo32(c/3)) { 14949 break 14950 } 14951 v.reset(OpAMD64SHLLconst) 14952 v.AuxInt = int8ToAuxInt(int8(log32(c / 3))) 14953 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 14954 v0.AddArg2(x, x) 14955 v.AddArg(v0) 14956 return true 14957 } 14958 // match: (MULLconst [c] x) 14959 // cond: c%5 == 0 && isPowerOfTwo32(c/5) 14960 // result: (SHLLconst [int8(log32(c/5))] (LEAL4 <v.Type> x x)) 14961 for { 14962 c := auxIntToInt32(v.AuxInt) 14963 x := v_0 14964 if !(c%5 == 0 && isPowerOfTwo32(c/5)) { 14965 break 14966 } 14967 v.reset(OpAMD64SHLLconst) 14968 v.AuxInt = int8ToAuxInt(int8(log32(c / 5))) 14969 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 14970 v0.AddArg2(x, x) 14971 v.AddArg(v0) 14972 return true 14973 } 14974 // match: (MULLconst [c] x) 14975 // cond: c%9 == 0 && isPowerOfTwo32(c/9) 14976 // result: (SHLLconst [int8(log32(c/9))] (LEAL8 <v.Type> x x)) 14977 for { 14978 c := auxIntToInt32(v.AuxInt) 14979 x := v_0 14980 if !(c%9 == 0 && isPowerOfTwo32(c/9)) { 14981 break 14982 } 14983 v.reset(OpAMD64SHLLconst) 14984 v.AuxInt = int8ToAuxInt(int8(log32(c / 9))) 14985 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 14986 v0.AddArg2(x, x) 14987 v.AddArg(v0) 14988 return true 14989 } 14990 // match: (MULLconst [c] (MOVLconst [d])) 14991 // result: (MOVLconst [c*d]) 14992 for { 14993 c := auxIntToInt32(v.AuxInt) 14994 if v_0.Op != OpAMD64MOVLconst { 14995 break 14996 } 14997 d := auxIntToInt32(v_0.AuxInt) 14998 v.reset(OpAMD64MOVLconst) 14999 v.AuxInt = int32ToAuxInt(c * d) 15000 return true 15001 } 15002 return false 15003 } 15004 func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool { 15005 v_1 := v.Args[1] 15006 v_0 := v.Args[0] 15007 // match: (MULQ x (MOVQconst [c])) 15008 // cond: is32Bit(c) 15009 // result: (MULQconst [int32(c)] x) 15010 for { 15011 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 15012 x := v_0 15013 if v_1.Op != OpAMD64MOVQconst { 15014 continue 15015 } 15016 c := auxIntToInt64(v_1.AuxInt) 15017 if !(is32Bit(c)) { 15018 continue 15019 } 15020 v.reset(OpAMD64MULQconst) 15021 v.AuxInt = int32ToAuxInt(int32(c)) 15022 v.AddArg(x) 15023 return true 15024 } 15025 break 15026 } 15027 return false 15028 } 15029 func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { 15030 v_0 := v.Args[0] 15031 b := v.Block 15032 // match: (MULQconst [c] (MULQconst [d] x)) 15033 // cond: is32Bit(int64(c)*int64(d)) 15034 // result: (MULQconst [c * d] x) 15035 for { 15036 c := auxIntToInt32(v.AuxInt) 15037 if v_0.Op != OpAMD64MULQconst { 15038 break 15039 } 15040 d := auxIntToInt32(v_0.AuxInt) 15041 x := v_0.Args[0] 15042 if !(is32Bit(int64(c) * int64(d))) { 15043 break 15044 } 15045 v.reset(OpAMD64MULQconst) 15046 v.AuxInt = int32ToAuxInt(c * d) 15047 v.AddArg(x) 15048 return true 15049 } 15050 // match: (MULQconst [-9] x) 15051 // result: (NEGQ (LEAQ8 <v.Type> x x)) 15052 for { 15053 if auxIntToInt32(v.AuxInt) != -9 { 15054 break 15055 } 15056 x := v_0 15057 v.reset(OpAMD64NEGQ) 15058 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 15059 v0.AddArg2(x, x) 15060 v.AddArg(v0) 15061 return true 15062 } 15063 // match: (MULQconst [-5] x) 15064 // result: (NEGQ (LEAQ4 <v.Type> x x)) 15065 for { 15066 if auxIntToInt32(v.AuxInt) != -5 { 15067 break 15068 } 15069 x := v_0 15070 v.reset(OpAMD64NEGQ) 15071 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 15072 v0.AddArg2(x, x) 15073 v.AddArg(v0) 15074 return true 15075 } 15076 // match: (MULQconst [-3] x) 15077 // result: (NEGQ (LEAQ2 <v.Type> x x)) 15078 for { 15079 if auxIntToInt32(v.AuxInt) != -3 { 15080 break 15081 } 15082 x := v_0 15083 v.reset(OpAMD64NEGQ) 15084 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 15085 v0.AddArg2(x, x) 15086 v.AddArg(v0) 15087 return true 15088 } 15089 // match: (MULQconst [-1] x) 15090 // result: (NEGQ x) 15091 for { 15092 if auxIntToInt32(v.AuxInt) != -1 { 15093 break 15094 } 15095 x := v_0 15096 v.reset(OpAMD64NEGQ) 15097 v.AddArg(x) 15098 return true 15099 } 15100 // match: (MULQconst [ 0] _) 15101 // result: (MOVQconst [0]) 15102 for { 15103 if auxIntToInt32(v.AuxInt) != 0 { 15104 break 15105 } 15106 v.reset(OpAMD64MOVQconst) 15107 v.AuxInt = int64ToAuxInt(0) 15108 return true 15109 } 15110 // match: (MULQconst [ 1] x) 15111 // result: x 15112 for { 15113 if auxIntToInt32(v.AuxInt) != 1 { 15114 break 15115 } 15116 x := v_0 15117 v.copyOf(x) 15118 return true 15119 } 15120 // match: (MULQconst [ 3] x) 15121 // result: (LEAQ2 x x) 15122 for { 15123 if auxIntToInt32(v.AuxInt) != 3 { 15124 break 15125 } 15126 x := v_0 15127 v.reset(OpAMD64LEAQ2) 15128 v.AddArg2(x, x) 15129 return true 15130 } 15131 // match: (MULQconst [ 5] x) 15132 // result: (LEAQ4 x x) 15133 for { 15134 if auxIntToInt32(v.AuxInt) != 5 { 15135 break 15136 } 15137 x := v_0 15138 v.reset(OpAMD64LEAQ4) 15139 v.AddArg2(x, x) 15140 return true 15141 } 15142 // match: (MULQconst [ 7] x) 15143 // result: (LEAQ2 x (LEAQ2 <v.Type> x x)) 15144 for { 15145 if auxIntToInt32(v.AuxInt) != 7 { 15146 break 15147 } 15148 x := v_0 15149 v.reset(OpAMD64LEAQ2) 15150 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 15151 v0.AddArg2(x, x) 15152 v.AddArg2(x, v0) 15153 return true 15154 } 15155 // match: (MULQconst [ 9] x) 15156 // result: (LEAQ8 x x) 15157 for { 15158 if auxIntToInt32(v.AuxInt) != 9 { 15159 break 15160 } 15161 x := v_0 15162 v.reset(OpAMD64LEAQ8) 15163 v.AddArg2(x, x) 15164 return true 15165 } 15166 // match: (MULQconst [11] x) 15167 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 15168 for { 15169 if auxIntToInt32(v.AuxInt) != 11 { 15170 break 15171 } 15172 x := v_0 15173 v.reset(OpAMD64LEAQ2) 15174 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 15175 v0.AddArg2(x, x) 15176 v.AddArg2(x, v0) 15177 return true 15178 } 15179 // match: (MULQconst [13] x) 15180 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 15181 for { 15182 if auxIntToInt32(v.AuxInt) != 13 { 15183 break 15184 } 15185 x := v_0 15186 v.reset(OpAMD64LEAQ4) 15187 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 15188 v0.AddArg2(x, x) 15189 v.AddArg2(x, v0) 15190 return true 15191 } 15192 // match: (MULQconst [19] x) 15193 // result: (LEAQ2 x (LEAQ8 <v.Type> x x)) 15194 for { 15195 if auxIntToInt32(v.AuxInt) != 19 { 15196 break 15197 } 15198 x := v_0 15199 v.reset(OpAMD64LEAQ2) 15200 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 15201 v0.AddArg2(x, x) 15202 v.AddArg2(x, v0) 15203 return true 15204 } 15205 // match: (MULQconst [21] x) 15206 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 15207 for { 15208 if auxIntToInt32(v.AuxInt) != 21 { 15209 break 15210 } 15211 x := v_0 15212 v.reset(OpAMD64LEAQ4) 15213 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 15214 v0.AddArg2(x, x) 15215 v.AddArg2(x, v0) 15216 return true 15217 } 15218 // match: (MULQconst [25] x) 15219 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 15220 for { 15221 if auxIntToInt32(v.AuxInt) != 25 { 15222 break 15223 } 15224 x := v_0 15225 v.reset(OpAMD64LEAQ8) 15226 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 15227 v0.AddArg2(x, x) 15228 v.AddArg2(x, v0) 15229 return true 15230 } 15231 // match: (MULQconst [27] x) 15232 // result: (LEAQ8 (LEAQ2 <v.Type> x x) (LEAQ2 <v.Type> x x)) 15233 for { 15234 if auxIntToInt32(v.AuxInt) != 27 { 15235 break 15236 } 15237 x := v_0 15238 v.reset(OpAMD64LEAQ8) 15239 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 15240 v0.AddArg2(x, x) 15241 v.AddArg2(v0, v0) 15242 return true 15243 } 15244 // match: (MULQconst [37] x) 15245 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 15246 for { 15247 if auxIntToInt32(v.AuxInt) != 37 { 15248 break 15249 } 15250 x := v_0 15251 v.reset(OpAMD64LEAQ4) 15252 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 15253 v0.AddArg2(x, x) 15254 v.AddArg2(x, v0) 15255 return true 15256 } 15257 // match: (MULQconst [41] x) 15258 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 15259 for { 15260 if auxIntToInt32(v.AuxInt) != 41 { 15261 break 15262 } 15263 x := v_0 15264 v.reset(OpAMD64LEAQ8) 15265 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 15266 v0.AddArg2(x, x) 15267 v.AddArg2(x, v0) 15268 return true 15269 } 15270 // match: (MULQconst [45] x) 15271 // result: (LEAQ8 (LEAQ4 <v.Type> x x) (LEAQ4 <v.Type> x x)) 15272 for { 15273 if auxIntToInt32(v.AuxInt) != 45 { 15274 break 15275 } 15276 x := v_0 15277 v.reset(OpAMD64LEAQ8) 15278 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 15279 v0.AddArg2(x, x) 15280 v.AddArg2(v0, v0) 15281 return true 15282 } 15283 // match: (MULQconst [73] x) 15284 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 15285 for { 15286 if auxIntToInt32(v.AuxInt) != 73 { 15287 break 15288 } 15289 x := v_0 15290 v.reset(OpAMD64LEAQ8) 15291 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 15292 v0.AddArg2(x, x) 15293 v.AddArg2(x, v0) 15294 return true 15295 } 15296 // match: (MULQconst [81] x) 15297 // result: (LEAQ8 (LEAQ8 <v.Type> x x) (LEAQ8 <v.Type> x x)) 15298 for { 15299 if auxIntToInt32(v.AuxInt) != 81 { 15300 break 15301 } 15302 x := v_0 15303 v.reset(OpAMD64LEAQ8) 15304 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 15305 v0.AddArg2(x, x) 15306 v.AddArg2(v0, v0) 15307 return true 15308 } 15309 // match: (MULQconst [c] x) 15310 // cond: isPowerOfTwo64(int64(c)+1) && c >= 15 15311 // result: (SUBQ (SHLQconst <v.Type> [int8(log64(int64(c)+1))] x) x) 15312 for { 15313 c := auxIntToInt32(v.AuxInt) 15314 x := v_0 15315 if !(isPowerOfTwo64(int64(c)+1) && c >= 15) { 15316 break 15317 } 15318 v.reset(OpAMD64SUBQ) 15319 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 15320 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1))) 15321 v0.AddArg(x) 15322 v.AddArg2(v0, x) 15323 return true 15324 } 15325 // match: (MULQconst [c] x) 15326 // cond: isPowerOfTwo32(c-1) && c >= 17 15327 // result: (LEAQ1 (SHLQconst <v.Type> [int8(log32(c-1))] x) x) 15328 for { 15329 c := auxIntToInt32(v.AuxInt) 15330 x := v_0 15331 if !(isPowerOfTwo32(c-1) && c >= 17) { 15332 break 15333 } 15334 v.reset(OpAMD64LEAQ1) 15335 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 15336 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1))) 15337 v0.AddArg(x) 15338 v.AddArg2(v0, x) 15339 return true 15340 } 15341 // match: (MULQconst [c] x) 15342 // cond: isPowerOfTwo32(c-2) && c >= 34 15343 // result: (LEAQ2 (SHLQconst <v.Type> [int8(log32(c-2))] x) x) 15344 for { 15345 c := auxIntToInt32(v.AuxInt) 15346 x := v_0 15347 if !(isPowerOfTwo32(c-2) && c >= 34) { 15348 break 15349 } 15350 v.reset(OpAMD64LEAQ2) 15351 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 15352 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2))) 15353 v0.AddArg(x) 15354 v.AddArg2(v0, x) 15355 return true 15356 } 15357 // match: (MULQconst [c] x) 15358 // cond: isPowerOfTwo32(c-4) && c >= 68 15359 // result: (LEAQ4 (SHLQconst <v.Type> [int8(log32(c-4))] x) x) 15360 for { 15361 c := auxIntToInt32(v.AuxInt) 15362 x := v_0 15363 if !(isPowerOfTwo32(c-4) && c >= 68) { 15364 break 15365 } 15366 v.reset(OpAMD64LEAQ4) 15367 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 15368 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4))) 15369 v0.AddArg(x) 15370 v.AddArg2(v0, x) 15371 return true 15372 } 15373 // match: (MULQconst [c] x) 15374 // cond: isPowerOfTwo32(c-8) && c >= 136 15375 // result: (LEAQ8 (SHLQconst <v.Type> [int8(log32(c-8))] x) x) 15376 for { 15377 c := auxIntToInt32(v.AuxInt) 15378 x := v_0 15379 if !(isPowerOfTwo32(c-8) && c >= 136) { 15380 break 15381 } 15382 v.reset(OpAMD64LEAQ8) 15383 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 15384 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8))) 15385 v0.AddArg(x) 15386 v.AddArg2(v0, x) 15387 return true 15388 } 15389 // match: (MULQconst [c] x) 15390 // cond: c%3 == 0 && isPowerOfTwo32(c/3) 15391 // result: (SHLQconst [int8(log32(c/3))] (LEAQ2 <v.Type> x x)) 15392 for { 15393 c := auxIntToInt32(v.AuxInt) 15394 x := v_0 15395 if !(c%3 == 0 && isPowerOfTwo32(c/3)) { 15396 break 15397 } 15398 v.reset(OpAMD64SHLQconst) 15399 v.AuxInt = int8ToAuxInt(int8(log32(c / 3))) 15400 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 15401 v0.AddArg2(x, x) 15402 v.AddArg(v0) 15403 return true 15404 } 15405 // match: (MULQconst [c] x) 15406 // cond: c%5 == 0 && isPowerOfTwo32(c/5) 15407 // result: (SHLQconst [int8(log32(c/5))] (LEAQ4 <v.Type> x x)) 15408 for { 15409 c := auxIntToInt32(v.AuxInt) 15410 x := v_0 15411 if !(c%5 == 0 && isPowerOfTwo32(c/5)) { 15412 break 15413 } 15414 v.reset(OpAMD64SHLQconst) 15415 v.AuxInt = int8ToAuxInt(int8(log32(c / 5))) 15416 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 15417 v0.AddArg2(x, x) 15418 v.AddArg(v0) 15419 return true 15420 } 15421 // match: (MULQconst [c] x) 15422 // cond: c%9 == 0 && isPowerOfTwo32(c/9) 15423 // result: (SHLQconst [int8(log32(c/9))] (LEAQ8 <v.Type> x x)) 15424 for { 15425 c := auxIntToInt32(v.AuxInt) 15426 x := v_0 15427 if !(c%9 == 0 && isPowerOfTwo32(c/9)) { 15428 break 15429 } 15430 v.reset(OpAMD64SHLQconst) 15431 v.AuxInt = int8ToAuxInt(int8(log32(c / 9))) 15432 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 15433 v0.AddArg2(x, x) 15434 v.AddArg(v0) 15435 return true 15436 } 15437 // match: (MULQconst [c] (MOVQconst [d])) 15438 // result: (MOVQconst [int64(c)*d]) 15439 for { 15440 c := auxIntToInt32(v.AuxInt) 15441 if v_0.Op != OpAMD64MOVQconst { 15442 break 15443 } 15444 d := auxIntToInt64(v_0.AuxInt) 15445 v.reset(OpAMD64MOVQconst) 15446 v.AuxInt = int64ToAuxInt(int64(c) * d) 15447 return true 15448 } 15449 // match: (MULQconst [c] (NEGQ x)) 15450 // cond: c != -(1<<31) 15451 // result: (MULQconst [-c] x) 15452 for { 15453 c := auxIntToInt32(v.AuxInt) 15454 if v_0.Op != OpAMD64NEGQ { 15455 break 15456 } 15457 x := v_0.Args[0] 15458 if !(c != -(1 << 31)) { 15459 break 15460 } 15461 v.reset(OpAMD64MULQconst) 15462 v.AuxInt = int32ToAuxInt(-c) 15463 v.AddArg(x) 15464 return true 15465 } 15466 return false 15467 } 15468 func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool { 15469 v_1 := v.Args[1] 15470 v_0 := v.Args[0] 15471 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 15472 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 15473 // result: (MULSDload x [off] {sym} ptr mem) 15474 for { 15475 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 15476 x := v_0 15477 l := v_1 15478 if l.Op != OpAMD64MOVSDload { 15479 continue 15480 } 15481 off := auxIntToInt32(l.AuxInt) 15482 sym := auxToSym(l.Aux) 15483 mem := l.Args[1] 15484 ptr := l.Args[0] 15485 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 15486 continue 15487 } 15488 v.reset(OpAMD64MULSDload) 15489 v.AuxInt = int32ToAuxInt(off) 15490 v.Aux = symToAux(sym) 15491 v.AddArg3(x, ptr, mem) 15492 return true 15493 } 15494 break 15495 } 15496 return false 15497 } 15498 func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { 15499 v_2 := v.Args[2] 15500 v_1 := v.Args[1] 15501 v_0 := v.Args[0] 15502 b := v.Block 15503 typ := &b.Func.Config.Types 15504 // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem) 15505 // cond: is32Bit(int64(off1)+int64(off2)) 15506 // result: (MULSDload [off1+off2] {sym} val base mem) 15507 for { 15508 off1 := auxIntToInt32(v.AuxInt) 15509 sym := auxToSym(v.Aux) 15510 val := v_0 15511 if v_1.Op != OpAMD64ADDQconst { 15512 break 15513 } 15514 off2 := auxIntToInt32(v_1.AuxInt) 15515 base := v_1.Args[0] 15516 mem := v_2 15517 if !(is32Bit(int64(off1) + int64(off2))) { 15518 break 15519 } 15520 v.reset(OpAMD64MULSDload) 15521 v.AuxInt = int32ToAuxInt(off1 + off2) 15522 v.Aux = symToAux(sym) 15523 v.AddArg3(val, base, mem) 15524 return true 15525 } 15526 // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 15527 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 15528 // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 15529 for { 15530 off1 := auxIntToInt32(v.AuxInt) 15531 sym1 := auxToSym(v.Aux) 15532 val := v_0 15533 if v_1.Op != OpAMD64LEAQ { 15534 break 15535 } 15536 off2 := auxIntToInt32(v_1.AuxInt) 15537 sym2 := auxToSym(v_1.Aux) 15538 base := v_1.Args[0] 15539 mem := v_2 15540 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 15541 break 15542 } 15543 v.reset(OpAMD64MULSDload) 15544 v.AuxInt = int32ToAuxInt(off1 + off2) 15545 v.Aux = symToAux(mergeSym(sym1, sym2)) 15546 v.AddArg3(val, base, mem) 15547 return true 15548 } 15549 // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 15550 // result: (MULSD x (MOVQi2f y)) 15551 for { 15552 off := auxIntToInt32(v.AuxInt) 15553 sym := auxToSym(v.Aux) 15554 x := v_0 15555 ptr := v_1 15556 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 15557 break 15558 } 15559 y := v_2.Args[1] 15560 if ptr != v_2.Args[0] { 15561 break 15562 } 15563 v.reset(OpAMD64MULSD) 15564 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) 15565 v0.AddArg(y) 15566 v.AddArg2(x, v0) 15567 return true 15568 } 15569 return false 15570 } 15571 func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool { 15572 v_1 := v.Args[1] 15573 v_0 := v.Args[0] 15574 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 15575 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 15576 // result: (MULSSload x [off] {sym} ptr mem) 15577 for { 15578 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 15579 x := v_0 15580 l := v_1 15581 if l.Op != OpAMD64MOVSSload { 15582 continue 15583 } 15584 off := auxIntToInt32(l.AuxInt) 15585 sym := auxToSym(l.Aux) 15586 mem := l.Args[1] 15587 ptr := l.Args[0] 15588 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 15589 continue 15590 } 15591 v.reset(OpAMD64MULSSload) 15592 v.AuxInt = int32ToAuxInt(off) 15593 v.Aux = symToAux(sym) 15594 v.AddArg3(x, ptr, mem) 15595 return true 15596 } 15597 break 15598 } 15599 return false 15600 } 15601 func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { 15602 v_2 := v.Args[2] 15603 v_1 := v.Args[1] 15604 v_0 := v.Args[0] 15605 b := v.Block 15606 typ := &b.Func.Config.Types 15607 // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem) 15608 // cond: is32Bit(int64(off1)+int64(off2)) 15609 // result: (MULSSload [off1+off2] {sym} val base mem) 15610 for { 15611 off1 := auxIntToInt32(v.AuxInt) 15612 sym := auxToSym(v.Aux) 15613 val := v_0 15614 if v_1.Op != OpAMD64ADDQconst { 15615 break 15616 } 15617 off2 := auxIntToInt32(v_1.AuxInt) 15618 base := v_1.Args[0] 15619 mem := v_2 15620 if !(is32Bit(int64(off1) + int64(off2))) { 15621 break 15622 } 15623 v.reset(OpAMD64MULSSload) 15624 v.AuxInt = int32ToAuxInt(off1 + off2) 15625 v.Aux = symToAux(sym) 15626 v.AddArg3(val, base, mem) 15627 return true 15628 } 15629 // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 15630 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 15631 // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 15632 for { 15633 off1 := auxIntToInt32(v.AuxInt) 15634 sym1 := auxToSym(v.Aux) 15635 val := v_0 15636 if v_1.Op != OpAMD64LEAQ { 15637 break 15638 } 15639 off2 := auxIntToInt32(v_1.AuxInt) 15640 sym2 := auxToSym(v_1.Aux) 15641 base := v_1.Args[0] 15642 mem := v_2 15643 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 15644 break 15645 } 15646 v.reset(OpAMD64MULSSload) 15647 v.AuxInt = int32ToAuxInt(off1 + off2) 15648 v.Aux = symToAux(mergeSym(sym1, sym2)) 15649 v.AddArg3(val, base, mem) 15650 return true 15651 } 15652 // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 15653 // result: (MULSS x (MOVLi2f y)) 15654 for { 15655 off := auxIntToInt32(v.AuxInt) 15656 sym := auxToSym(v.Aux) 15657 x := v_0 15658 ptr := v_1 15659 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 15660 break 15661 } 15662 y := v_2.Args[1] 15663 if ptr != v_2.Args[0] { 15664 break 15665 } 15666 v.reset(OpAMD64MULSS) 15667 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) 15668 v0.AddArg(y) 15669 v.AddArg2(x, v0) 15670 return true 15671 } 15672 return false 15673 } 15674 func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool { 15675 v_0 := v.Args[0] 15676 // match: (NEGL (NEGL x)) 15677 // result: x 15678 for { 15679 if v_0.Op != OpAMD64NEGL { 15680 break 15681 } 15682 x := v_0.Args[0] 15683 v.copyOf(x) 15684 return true 15685 } 15686 // match: (NEGL s:(SUBL x y)) 15687 // cond: s.Uses == 1 15688 // result: (SUBL y x) 15689 for { 15690 s := v_0 15691 if s.Op != OpAMD64SUBL { 15692 break 15693 } 15694 y := s.Args[1] 15695 x := s.Args[0] 15696 if !(s.Uses == 1) { 15697 break 15698 } 15699 v.reset(OpAMD64SUBL) 15700 v.AddArg2(y, x) 15701 return true 15702 } 15703 // match: (NEGL (MOVLconst [c])) 15704 // result: (MOVLconst [-c]) 15705 for { 15706 if v_0.Op != OpAMD64MOVLconst { 15707 break 15708 } 15709 c := auxIntToInt32(v_0.AuxInt) 15710 v.reset(OpAMD64MOVLconst) 15711 v.AuxInt = int32ToAuxInt(-c) 15712 return true 15713 } 15714 return false 15715 } 15716 func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool { 15717 v_0 := v.Args[0] 15718 // match: (NEGQ (NEGQ x)) 15719 // result: x 15720 for { 15721 if v_0.Op != OpAMD64NEGQ { 15722 break 15723 } 15724 x := v_0.Args[0] 15725 v.copyOf(x) 15726 return true 15727 } 15728 // match: (NEGQ s:(SUBQ x y)) 15729 // cond: s.Uses == 1 15730 // result: (SUBQ y x) 15731 for { 15732 s := v_0 15733 if s.Op != OpAMD64SUBQ { 15734 break 15735 } 15736 y := s.Args[1] 15737 x := s.Args[0] 15738 if !(s.Uses == 1) { 15739 break 15740 } 15741 v.reset(OpAMD64SUBQ) 15742 v.AddArg2(y, x) 15743 return true 15744 } 15745 // match: (NEGQ (MOVQconst [c])) 15746 // result: (MOVQconst [-c]) 15747 for { 15748 if v_0.Op != OpAMD64MOVQconst { 15749 break 15750 } 15751 c := auxIntToInt64(v_0.AuxInt) 15752 v.reset(OpAMD64MOVQconst) 15753 v.AuxInt = int64ToAuxInt(-c) 15754 return true 15755 } 15756 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 15757 // cond: c != -(1<<31) 15758 // result: (ADDQconst [-c] x) 15759 for { 15760 if v_0.Op != OpAMD64ADDQconst { 15761 break 15762 } 15763 c := auxIntToInt32(v_0.AuxInt) 15764 v_0_0 := v_0.Args[0] 15765 if v_0_0.Op != OpAMD64NEGQ { 15766 break 15767 } 15768 x := v_0_0.Args[0] 15769 if !(c != -(1 << 31)) { 15770 break 15771 } 15772 v.reset(OpAMD64ADDQconst) 15773 v.AuxInt = int32ToAuxInt(-c) 15774 v.AddArg(x) 15775 return true 15776 } 15777 return false 15778 } 15779 func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool { 15780 v_0 := v.Args[0] 15781 // match: (NOTL (MOVLconst [c])) 15782 // result: (MOVLconst [^c]) 15783 for { 15784 if v_0.Op != OpAMD64MOVLconst { 15785 break 15786 } 15787 c := auxIntToInt32(v_0.AuxInt) 15788 v.reset(OpAMD64MOVLconst) 15789 v.AuxInt = int32ToAuxInt(^c) 15790 return true 15791 } 15792 return false 15793 } 15794 func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool { 15795 v_0 := v.Args[0] 15796 // match: (NOTQ (MOVQconst [c])) 15797 // result: (MOVQconst [^c]) 15798 for { 15799 if v_0.Op != OpAMD64MOVQconst { 15800 break 15801 } 15802 c := auxIntToInt64(v_0.AuxInt) 15803 v.reset(OpAMD64MOVQconst) 15804 v.AuxInt = int64ToAuxInt(^c) 15805 return true 15806 } 15807 return false 15808 } 15809 func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { 15810 v_1 := v.Args[1] 15811 v_0 := v.Args[0] 15812 b := v.Block 15813 typ := &b.Func.Config.Types 15814 // match: (ORL (SHLL (MOVLconst [1]) y) x) 15815 // result: (BTSL x y) 15816 for { 15817 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 15818 if v_0.Op != OpAMD64SHLL { 15819 continue 15820 } 15821 y := v_0.Args[1] 15822 v_0_0 := v_0.Args[0] 15823 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 { 15824 continue 15825 } 15826 x := v_1 15827 v.reset(OpAMD64BTSL) 15828 v.AddArg2(x, y) 15829 return true 15830 } 15831 break 15832 } 15833 // match: (ORL (MOVLconst [c]) x) 15834 // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128 15835 // result: (BTSLconst [int8(log32(c))] x) 15836 for { 15837 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 15838 if v_0.Op != OpAMD64MOVLconst { 15839 continue 15840 } 15841 c := auxIntToInt32(v_0.AuxInt) 15842 x := v_1 15843 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) { 15844 continue 15845 } 15846 v.reset(OpAMD64BTSLconst) 15847 v.AuxInt = int8ToAuxInt(int8(log32(c))) 15848 v.AddArg(x) 15849 return true 15850 } 15851 break 15852 } 15853 // match: (ORL x (MOVLconst [c])) 15854 // result: (ORLconst [c] x) 15855 for { 15856 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 15857 x := v_0 15858 if v_1.Op != OpAMD64MOVLconst { 15859 continue 15860 } 15861 c := auxIntToInt32(v_1.AuxInt) 15862 v.reset(OpAMD64ORLconst) 15863 v.AuxInt = int32ToAuxInt(c) 15864 v.AddArg(x) 15865 return true 15866 } 15867 break 15868 } 15869 // match: (ORL x x) 15870 // result: x 15871 for { 15872 x := v_0 15873 if x != v_1 { 15874 break 15875 } 15876 v.copyOf(x) 15877 return true 15878 } 15879 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 15880 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 15881 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 15882 for { 15883 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 15884 x0 := v_0 15885 if x0.Op != OpAMD64MOVBload { 15886 continue 15887 } 15888 i0 := auxIntToInt32(x0.AuxInt) 15889 s := auxToSym(x0.Aux) 15890 mem := x0.Args[1] 15891 p := x0.Args[0] 15892 sh := v_1 15893 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 { 15894 continue 15895 } 15896 x1 := sh.Args[0] 15897 if x1.Op != OpAMD64MOVBload { 15898 continue 15899 } 15900 i1 := auxIntToInt32(x1.AuxInt) 15901 if auxToSym(x1.Aux) != s { 15902 continue 15903 } 15904 _ = x1.Args[1] 15905 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 15906 continue 15907 } 15908 b = mergePoint(b, x0, x1) 15909 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 15910 v.copyOf(v0) 15911 v0.AuxInt = int32ToAuxInt(i0) 15912 v0.Aux = symToAux(s) 15913 v0.AddArg2(p, mem) 15914 return true 15915 } 15916 break 15917 } 15918 // match: (ORL x0:(MOVBload [i] {s} p0 mem) sh:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem))) 15919 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 15920 // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem) 15921 for { 15922 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 15923 x0 := v_0 15924 if x0.Op != OpAMD64MOVBload { 15925 continue 15926 } 15927 i := auxIntToInt32(x0.AuxInt) 15928 s := auxToSym(x0.Aux) 15929 mem := x0.Args[1] 15930 p0 := x0.Args[0] 15931 sh := v_1 15932 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 { 15933 continue 15934 } 15935 x1 := sh.Args[0] 15936 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { 15937 continue 15938 } 15939 _ = x1.Args[1] 15940 p1 := x1.Args[0] 15941 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 15942 continue 15943 } 15944 b = mergePoint(b, x0, x1) 15945 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 15946 v.copyOf(v0) 15947 v0.AuxInt = int32ToAuxInt(i) 15948 v0.Aux = symToAux(s) 15949 v0.AddArg2(p0, mem) 15950 return true 15951 } 15952 break 15953 } 15954 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 15955 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 15956 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 15957 for { 15958 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 15959 x0 := v_0 15960 if x0.Op != OpAMD64MOVWload { 15961 continue 15962 } 15963 i0 := auxIntToInt32(x0.AuxInt) 15964 s := auxToSym(x0.Aux) 15965 mem := x0.Args[1] 15966 p := x0.Args[0] 15967 sh := v_1 15968 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 { 15969 continue 15970 } 15971 x1 := sh.Args[0] 15972 if x1.Op != OpAMD64MOVWload { 15973 continue 15974 } 15975 i1 := auxIntToInt32(x1.AuxInt) 15976 if auxToSym(x1.Aux) != s { 15977 continue 15978 } 15979 _ = x1.Args[1] 15980 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 15981 continue 15982 } 15983 b = mergePoint(b, x0, x1) 15984 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 15985 v.copyOf(v0) 15986 v0.AuxInt = int32ToAuxInt(i0) 15987 v0.Aux = symToAux(s) 15988 v0.AddArg2(p, mem) 15989 return true 15990 } 15991 break 15992 } 15993 // match: (ORL x0:(MOVWload [i] {s} p0 mem) sh:(SHLLconst [16] x1:(MOVWload [i] {s} p1 mem))) 15994 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 15995 // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem) 15996 for { 15997 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 15998 x0 := v_0 15999 if x0.Op != OpAMD64MOVWload { 16000 continue 16001 } 16002 i := auxIntToInt32(x0.AuxInt) 16003 s := auxToSym(x0.Aux) 16004 mem := x0.Args[1] 16005 p0 := x0.Args[0] 16006 sh := v_1 16007 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 { 16008 continue 16009 } 16010 x1 := sh.Args[0] 16011 if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { 16012 continue 16013 } 16014 _ = x1.Args[1] 16015 p1 := x1.Args[0] 16016 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 16017 continue 16018 } 16019 b = mergePoint(b, x0, x1) 16020 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 16021 v.copyOf(v0) 16022 v0.AuxInt = int32ToAuxInt(i) 16023 v0.Aux = symToAux(s) 16024 v0.AddArg2(p0, mem) 16025 return true 16026 } 16027 break 16028 } 16029 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 16030 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) 16031 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16032 for { 16033 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16034 s1 := v_0 16035 if s1.Op != OpAMD64SHLLconst { 16036 continue 16037 } 16038 j1 := auxIntToInt8(s1.AuxInt) 16039 x1 := s1.Args[0] 16040 if x1.Op != OpAMD64MOVBload { 16041 continue 16042 } 16043 i1 := auxIntToInt32(x1.AuxInt) 16044 s := auxToSym(x1.Aux) 16045 mem := x1.Args[1] 16046 p := x1.Args[0] 16047 or := v_1 16048 if or.Op != OpAMD64ORL { 16049 continue 16050 } 16051 _ = or.Args[1] 16052 or_0 := or.Args[0] 16053 or_1 := or.Args[1] 16054 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { 16055 s0 := or_0 16056 if s0.Op != OpAMD64SHLLconst { 16057 continue 16058 } 16059 j0 := auxIntToInt8(s0.AuxInt) 16060 x0 := s0.Args[0] 16061 if x0.Op != OpAMD64MOVBload { 16062 continue 16063 } 16064 i0 := auxIntToInt32(x0.AuxInt) 16065 if auxToSym(x0.Aux) != s { 16066 continue 16067 } 16068 _ = x0.Args[1] 16069 if p != x0.Args[0] || mem != x0.Args[1] { 16070 continue 16071 } 16072 y := or_1 16073 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { 16074 continue 16075 } 16076 b = mergePoint(b, x0, x1, y) 16077 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) 16078 v.copyOf(v0) 16079 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) 16080 v1.AuxInt = int8ToAuxInt(j0) 16081 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 16082 v2.AuxInt = int32ToAuxInt(i0) 16083 v2.Aux = symToAux(s) 16084 v2.AddArg2(p, mem) 16085 v1.AddArg(v2) 16086 v0.AddArg2(v1, y) 16087 return true 16088 } 16089 } 16090 break 16091 } 16092 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i] {s} p1 mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i] {s} p0 mem)) y)) 16093 // cond: j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) 16094 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y) 16095 for { 16096 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16097 s1 := v_0 16098 if s1.Op != OpAMD64SHLLconst { 16099 continue 16100 } 16101 j1 := auxIntToInt8(s1.AuxInt) 16102 x1 := s1.Args[0] 16103 if x1.Op != OpAMD64MOVBload { 16104 continue 16105 } 16106 i := auxIntToInt32(x1.AuxInt) 16107 s := auxToSym(x1.Aux) 16108 mem := x1.Args[1] 16109 p1 := x1.Args[0] 16110 or := v_1 16111 if or.Op != OpAMD64ORL { 16112 continue 16113 } 16114 _ = or.Args[1] 16115 or_0 := or.Args[0] 16116 or_1 := or.Args[1] 16117 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { 16118 s0 := or_0 16119 if s0.Op != OpAMD64SHLLconst { 16120 continue 16121 } 16122 j0 := auxIntToInt8(s0.AuxInt) 16123 x0 := s0.Args[0] 16124 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { 16125 continue 16126 } 16127 _ = x0.Args[1] 16128 p0 := x0.Args[0] 16129 if mem != x0.Args[1] { 16130 continue 16131 } 16132 y := or_1 16133 if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { 16134 continue 16135 } 16136 b = mergePoint(b, x0, x1, y) 16137 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) 16138 v.copyOf(v0) 16139 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) 16140 v1.AuxInt = int8ToAuxInt(j0) 16141 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 16142 v2.AuxInt = int32ToAuxInt(i) 16143 v2.Aux = symToAux(s) 16144 v2.AddArg2(p0, mem) 16145 v1.AddArg(v2) 16146 v0.AddArg2(v1, y) 16147 return true 16148 } 16149 } 16150 break 16151 } 16152 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 16153 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 16154 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 16155 for { 16156 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16157 x1 := v_0 16158 if x1.Op != OpAMD64MOVBload { 16159 continue 16160 } 16161 i1 := auxIntToInt32(x1.AuxInt) 16162 s := auxToSym(x1.Aux) 16163 mem := x1.Args[1] 16164 p := x1.Args[0] 16165 sh := v_1 16166 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 { 16167 continue 16168 } 16169 x0 := sh.Args[0] 16170 if x0.Op != OpAMD64MOVBload { 16171 continue 16172 } 16173 i0 := auxIntToInt32(x0.AuxInt) 16174 if auxToSym(x0.Aux) != s { 16175 continue 16176 } 16177 _ = x0.Args[1] 16178 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 16179 continue 16180 } 16181 b = mergePoint(b, x0, x1) 16182 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) 16183 v.copyOf(v0) 16184 v0.AuxInt = int8ToAuxInt(8) 16185 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 16186 v1.AuxInt = int32ToAuxInt(i0) 16187 v1.Aux = symToAux(s) 16188 v1.AddArg2(p, mem) 16189 v0.AddArg(v1) 16190 return true 16191 } 16192 break 16193 } 16194 // match: (ORL x1:(MOVBload [i] {s} p1 mem) sh:(SHLLconst [8] x0:(MOVBload [i] {s} p0 mem))) 16195 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 16196 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem)) 16197 for { 16198 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16199 x1 := v_0 16200 if x1.Op != OpAMD64MOVBload { 16201 continue 16202 } 16203 i := auxIntToInt32(x1.AuxInt) 16204 s := auxToSym(x1.Aux) 16205 mem := x1.Args[1] 16206 p1 := x1.Args[0] 16207 sh := v_1 16208 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 { 16209 continue 16210 } 16211 x0 := sh.Args[0] 16212 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { 16213 continue 16214 } 16215 _ = x0.Args[1] 16216 p0 := x0.Args[0] 16217 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 16218 continue 16219 } 16220 b = mergePoint(b, x0, x1) 16221 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) 16222 v.copyOf(v0) 16223 v0.AuxInt = int8ToAuxInt(8) 16224 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 16225 v1.AuxInt = int32ToAuxInt(i) 16226 v1.Aux = symToAux(s) 16227 v1.AddArg2(p0, mem) 16228 v0.AddArg(v1) 16229 return true 16230 } 16231 break 16232 } 16233 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 16234 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) 16235 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 16236 for { 16237 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16238 r1 := v_0 16239 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 { 16240 continue 16241 } 16242 x1 := r1.Args[0] 16243 if x1.Op != OpAMD64MOVWload { 16244 continue 16245 } 16246 i1 := auxIntToInt32(x1.AuxInt) 16247 s := auxToSym(x1.Aux) 16248 mem := x1.Args[1] 16249 p := x1.Args[0] 16250 sh := v_1 16251 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 { 16252 continue 16253 } 16254 r0 := sh.Args[0] 16255 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 { 16256 continue 16257 } 16258 x0 := r0.Args[0] 16259 if x0.Op != OpAMD64MOVWload { 16260 continue 16261 } 16262 i0 := auxIntToInt32(x0.AuxInt) 16263 if auxToSym(x0.Aux) != s { 16264 continue 16265 } 16266 _ = x0.Args[1] 16267 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) { 16268 continue 16269 } 16270 b = mergePoint(b, x0, x1) 16271 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) 16272 v.copyOf(v0) 16273 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 16274 v1.AuxInt = int32ToAuxInt(i0) 16275 v1.Aux = symToAux(s) 16276 v1.AddArg2(p, mem) 16277 v0.AddArg(v1) 16278 return true 16279 } 16280 break 16281 } 16282 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem)))) 16283 // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) 16284 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem)) 16285 for { 16286 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16287 r1 := v_0 16288 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 { 16289 continue 16290 } 16291 x1 := r1.Args[0] 16292 if x1.Op != OpAMD64MOVWload { 16293 continue 16294 } 16295 i := auxIntToInt32(x1.AuxInt) 16296 s := auxToSym(x1.Aux) 16297 mem := x1.Args[1] 16298 p1 := x1.Args[0] 16299 sh := v_1 16300 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 { 16301 continue 16302 } 16303 r0 := sh.Args[0] 16304 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 { 16305 continue 16306 } 16307 x0 := r0.Args[0] 16308 if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { 16309 continue 16310 } 16311 _ = x0.Args[1] 16312 p0 := x0.Args[0] 16313 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) { 16314 continue 16315 } 16316 b = mergePoint(b, x0, x1) 16317 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) 16318 v.copyOf(v0) 16319 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 16320 v1.AuxInt = int32ToAuxInt(i) 16321 v1.Aux = symToAux(s) 16322 v1.AddArg2(p0, mem) 16323 v0.AddArg(v1) 16324 return true 16325 } 16326 break 16327 } 16328 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 16329 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) 16330 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 16331 for { 16332 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16333 s0 := v_0 16334 if s0.Op != OpAMD64SHLLconst { 16335 continue 16336 } 16337 j0 := auxIntToInt8(s0.AuxInt) 16338 x0 := s0.Args[0] 16339 if x0.Op != OpAMD64MOVBload { 16340 continue 16341 } 16342 i0 := auxIntToInt32(x0.AuxInt) 16343 s := auxToSym(x0.Aux) 16344 mem := x0.Args[1] 16345 p := x0.Args[0] 16346 or := v_1 16347 if or.Op != OpAMD64ORL { 16348 continue 16349 } 16350 _ = or.Args[1] 16351 or_0 := or.Args[0] 16352 or_1 := or.Args[1] 16353 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { 16354 s1 := or_0 16355 if s1.Op != OpAMD64SHLLconst { 16356 continue 16357 } 16358 j1 := auxIntToInt8(s1.AuxInt) 16359 x1 := s1.Args[0] 16360 if x1.Op != OpAMD64MOVBload { 16361 continue 16362 } 16363 i1 := auxIntToInt32(x1.AuxInt) 16364 if auxToSym(x1.Aux) != s { 16365 continue 16366 } 16367 _ = x1.Args[1] 16368 if p != x1.Args[0] || mem != x1.Args[1] { 16369 continue 16370 } 16371 y := or_1 16372 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { 16373 continue 16374 } 16375 b = mergePoint(b, x0, x1, y) 16376 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) 16377 v.copyOf(v0) 16378 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) 16379 v1.AuxInt = int8ToAuxInt(j1) 16380 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) 16381 v2.AuxInt = int8ToAuxInt(8) 16382 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 16383 v3.AuxInt = int32ToAuxInt(i0) 16384 v3.Aux = symToAux(s) 16385 v3.AddArg2(p, mem) 16386 v2.AddArg(v3) 16387 v1.AddArg(v2) 16388 v0.AddArg2(v1, y) 16389 return true 16390 } 16391 } 16392 break 16393 } 16394 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i] {s} p0 mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i] {s} p1 mem)) y)) 16395 // cond: j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) 16396 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y) 16397 for { 16398 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16399 s0 := v_0 16400 if s0.Op != OpAMD64SHLLconst { 16401 continue 16402 } 16403 j0 := auxIntToInt8(s0.AuxInt) 16404 x0 := s0.Args[0] 16405 if x0.Op != OpAMD64MOVBload { 16406 continue 16407 } 16408 i := auxIntToInt32(x0.AuxInt) 16409 s := auxToSym(x0.Aux) 16410 mem := x0.Args[1] 16411 p0 := x0.Args[0] 16412 or := v_1 16413 if or.Op != OpAMD64ORL { 16414 continue 16415 } 16416 _ = or.Args[1] 16417 or_0 := or.Args[0] 16418 or_1 := or.Args[1] 16419 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { 16420 s1 := or_0 16421 if s1.Op != OpAMD64SHLLconst { 16422 continue 16423 } 16424 j1 := auxIntToInt8(s1.AuxInt) 16425 x1 := s1.Args[0] 16426 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { 16427 continue 16428 } 16429 _ = x1.Args[1] 16430 p1 := x1.Args[0] 16431 if mem != x1.Args[1] { 16432 continue 16433 } 16434 y := or_1 16435 if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { 16436 continue 16437 } 16438 b = mergePoint(b, x0, x1, y) 16439 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) 16440 v.copyOf(v0) 16441 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) 16442 v1.AuxInt = int8ToAuxInt(j1) 16443 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) 16444 v2.AuxInt = int8ToAuxInt(8) 16445 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 16446 v3.AuxInt = int32ToAuxInt(i) 16447 v3.Aux = symToAux(s) 16448 v3.AddArg2(p0, mem) 16449 v2.AddArg(v3) 16450 v1.AddArg(v2) 16451 v0.AddArg2(v1, y) 16452 return true 16453 } 16454 } 16455 break 16456 } 16457 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 16458 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 16459 // result: (ORLload x [off] {sym} ptr mem) 16460 for { 16461 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16462 x := v_0 16463 l := v_1 16464 if l.Op != OpAMD64MOVLload { 16465 continue 16466 } 16467 off := auxIntToInt32(l.AuxInt) 16468 sym := auxToSym(l.Aux) 16469 mem := l.Args[1] 16470 ptr := l.Args[0] 16471 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 16472 continue 16473 } 16474 v.reset(OpAMD64ORLload) 16475 v.AuxInt = int32ToAuxInt(off) 16476 v.Aux = symToAux(sym) 16477 v.AddArg3(x, ptr, mem) 16478 return true 16479 } 16480 break 16481 } 16482 return false 16483 } 16484 func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool { 16485 v_0 := v.Args[0] 16486 // match: (ORLconst [c] x) 16487 // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128 16488 // result: (BTSLconst [int8(log32(c))] x) 16489 for { 16490 c := auxIntToInt32(v.AuxInt) 16491 x := v_0 16492 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) { 16493 break 16494 } 16495 v.reset(OpAMD64BTSLconst) 16496 v.AuxInt = int8ToAuxInt(int8(log32(c))) 16497 v.AddArg(x) 16498 return true 16499 } 16500 // match: (ORLconst [c] (ORLconst [d] x)) 16501 // result: (ORLconst [c | d] x) 16502 for { 16503 c := auxIntToInt32(v.AuxInt) 16504 if v_0.Op != OpAMD64ORLconst { 16505 break 16506 } 16507 d := auxIntToInt32(v_0.AuxInt) 16508 x := v_0.Args[0] 16509 v.reset(OpAMD64ORLconst) 16510 v.AuxInt = int32ToAuxInt(c | d) 16511 v.AddArg(x) 16512 return true 16513 } 16514 // match: (ORLconst [c] (BTSLconst [d] x)) 16515 // result: (ORLconst [c | 1<<uint32(d)] x) 16516 for { 16517 c := auxIntToInt32(v.AuxInt) 16518 if v_0.Op != OpAMD64BTSLconst { 16519 break 16520 } 16521 d := auxIntToInt8(v_0.AuxInt) 16522 x := v_0.Args[0] 16523 v.reset(OpAMD64ORLconst) 16524 v.AuxInt = int32ToAuxInt(c | 1<<uint32(d)) 16525 v.AddArg(x) 16526 return true 16527 } 16528 // match: (ORLconst [c] x) 16529 // cond: c==0 16530 // result: x 16531 for { 16532 c := auxIntToInt32(v.AuxInt) 16533 x := v_0 16534 if !(c == 0) { 16535 break 16536 } 16537 v.copyOf(x) 16538 return true 16539 } 16540 // match: (ORLconst [c] _) 16541 // cond: c==-1 16542 // result: (MOVLconst [-1]) 16543 for { 16544 c := auxIntToInt32(v.AuxInt) 16545 if !(c == -1) { 16546 break 16547 } 16548 v.reset(OpAMD64MOVLconst) 16549 v.AuxInt = int32ToAuxInt(-1) 16550 return true 16551 } 16552 // match: (ORLconst [c] (MOVLconst [d])) 16553 // result: (MOVLconst [c|d]) 16554 for { 16555 c := auxIntToInt32(v.AuxInt) 16556 if v_0.Op != OpAMD64MOVLconst { 16557 break 16558 } 16559 d := auxIntToInt32(v_0.AuxInt) 16560 v.reset(OpAMD64MOVLconst) 16561 v.AuxInt = int32ToAuxInt(c | d) 16562 return true 16563 } 16564 return false 16565 } 16566 func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool { 16567 v_1 := v.Args[1] 16568 v_0 := v.Args[0] 16569 // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 16570 // cond: ValAndOff(valoff1).canAdd32(off2) 16571 // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 16572 for { 16573 valoff1 := auxIntToValAndOff(v.AuxInt) 16574 sym := auxToSym(v.Aux) 16575 if v_0.Op != OpAMD64ADDQconst { 16576 break 16577 } 16578 off2 := auxIntToInt32(v_0.AuxInt) 16579 base := v_0.Args[0] 16580 mem := v_1 16581 if !(ValAndOff(valoff1).canAdd32(off2)) { 16582 break 16583 } 16584 v.reset(OpAMD64ORLconstmodify) 16585 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 16586 v.Aux = symToAux(sym) 16587 v.AddArg2(base, mem) 16588 return true 16589 } 16590 // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 16591 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 16592 // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 16593 for { 16594 valoff1 := auxIntToValAndOff(v.AuxInt) 16595 sym1 := auxToSym(v.Aux) 16596 if v_0.Op != OpAMD64LEAQ { 16597 break 16598 } 16599 off2 := auxIntToInt32(v_0.AuxInt) 16600 sym2 := auxToSym(v_0.Aux) 16601 base := v_0.Args[0] 16602 mem := v_1 16603 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 16604 break 16605 } 16606 v.reset(OpAMD64ORLconstmodify) 16607 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 16608 v.Aux = symToAux(mergeSym(sym1, sym2)) 16609 v.AddArg2(base, mem) 16610 return true 16611 } 16612 return false 16613 } 16614 func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { 16615 v_2 := v.Args[2] 16616 v_1 := v.Args[1] 16617 v_0 := v.Args[0] 16618 b := v.Block 16619 typ := &b.Func.Config.Types 16620 // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem) 16621 // cond: is32Bit(int64(off1)+int64(off2)) 16622 // result: (ORLload [off1+off2] {sym} val base mem) 16623 for { 16624 off1 := auxIntToInt32(v.AuxInt) 16625 sym := auxToSym(v.Aux) 16626 val := v_0 16627 if v_1.Op != OpAMD64ADDQconst { 16628 break 16629 } 16630 off2 := auxIntToInt32(v_1.AuxInt) 16631 base := v_1.Args[0] 16632 mem := v_2 16633 if !(is32Bit(int64(off1) + int64(off2))) { 16634 break 16635 } 16636 v.reset(OpAMD64ORLload) 16637 v.AuxInt = int32ToAuxInt(off1 + off2) 16638 v.Aux = symToAux(sym) 16639 v.AddArg3(val, base, mem) 16640 return true 16641 } 16642 // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 16643 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 16644 // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 16645 for { 16646 off1 := auxIntToInt32(v.AuxInt) 16647 sym1 := auxToSym(v.Aux) 16648 val := v_0 16649 if v_1.Op != OpAMD64LEAQ { 16650 break 16651 } 16652 off2 := auxIntToInt32(v_1.AuxInt) 16653 sym2 := auxToSym(v_1.Aux) 16654 base := v_1.Args[0] 16655 mem := v_2 16656 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 16657 break 16658 } 16659 v.reset(OpAMD64ORLload) 16660 v.AuxInt = int32ToAuxInt(off1 + off2) 16661 v.Aux = symToAux(mergeSym(sym1, sym2)) 16662 v.AddArg3(val, base, mem) 16663 return true 16664 } 16665 // match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 16666 // result: ( ORL x (MOVLf2i y)) 16667 for { 16668 off := auxIntToInt32(v.AuxInt) 16669 sym := auxToSym(v.Aux) 16670 x := v_0 16671 ptr := v_1 16672 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 16673 break 16674 } 16675 y := v_2.Args[1] 16676 if ptr != v_2.Args[0] { 16677 break 16678 } 16679 v.reset(OpAMD64ORL) 16680 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 16681 v0.AddArg(y) 16682 v.AddArg2(x, v0) 16683 return true 16684 } 16685 return false 16686 } 16687 func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool { 16688 v_2 := v.Args[2] 16689 v_1 := v.Args[1] 16690 v_0 := v.Args[0] 16691 // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 16692 // cond: is32Bit(int64(off1)+int64(off2)) 16693 // result: (ORLmodify [off1+off2] {sym} base val mem) 16694 for { 16695 off1 := auxIntToInt32(v.AuxInt) 16696 sym := auxToSym(v.Aux) 16697 if v_0.Op != OpAMD64ADDQconst { 16698 break 16699 } 16700 off2 := auxIntToInt32(v_0.AuxInt) 16701 base := v_0.Args[0] 16702 val := v_1 16703 mem := v_2 16704 if !(is32Bit(int64(off1) + int64(off2))) { 16705 break 16706 } 16707 v.reset(OpAMD64ORLmodify) 16708 v.AuxInt = int32ToAuxInt(off1 + off2) 16709 v.Aux = symToAux(sym) 16710 v.AddArg3(base, val, mem) 16711 return true 16712 } 16713 // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 16714 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 16715 // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 16716 for { 16717 off1 := auxIntToInt32(v.AuxInt) 16718 sym1 := auxToSym(v.Aux) 16719 if v_0.Op != OpAMD64LEAQ { 16720 break 16721 } 16722 off2 := auxIntToInt32(v_0.AuxInt) 16723 sym2 := auxToSym(v_0.Aux) 16724 base := v_0.Args[0] 16725 val := v_1 16726 mem := v_2 16727 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 16728 break 16729 } 16730 v.reset(OpAMD64ORLmodify) 16731 v.AuxInt = int32ToAuxInt(off1 + off2) 16732 v.Aux = symToAux(mergeSym(sym1, sym2)) 16733 v.AddArg3(base, val, mem) 16734 return true 16735 } 16736 return false 16737 } 16738 func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { 16739 v_1 := v.Args[1] 16740 v_0 := v.Args[0] 16741 b := v.Block 16742 typ := &b.Func.Config.Types 16743 // match: (ORQ (SHLQ (MOVQconst [1]) y) x) 16744 // result: (BTSQ x y) 16745 for { 16746 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16747 if v_0.Op != OpAMD64SHLQ { 16748 continue 16749 } 16750 y := v_0.Args[1] 16751 v_0_0 := v_0.Args[0] 16752 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 { 16753 continue 16754 } 16755 x := v_1 16756 v.reset(OpAMD64BTSQ) 16757 v.AddArg2(x, y) 16758 return true 16759 } 16760 break 16761 } 16762 // match: (ORQ (MOVQconst [c]) x) 16763 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 16764 // result: (BTSQconst [int8(log64(c))] x) 16765 for { 16766 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16767 if v_0.Op != OpAMD64MOVQconst { 16768 continue 16769 } 16770 c := auxIntToInt64(v_0.AuxInt) 16771 x := v_1 16772 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { 16773 continue 16774 } 16775 v.reset(OpAMD64BTSQconst) 16776 v.AuxInt = int8ToAuxInt(int8(log64(c))) 16777 v.AddArg(x) 16778 return true 16779 } 16780 break 16781 } 16782 // match: (ORQ x (MOVQconst [c])) 16783 // cond: is32Bit(c) 16784 // result: (ORQconst [int32(c)] x) 16785 for { 16786 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16787 x := v_0 16788 if v_1.Op != OpAMD64MOVQconst { 16789 continue 16790 } 16791 c := auxIntToInt64(v_1.AuxInt) 16792 if !(is32Bit(c)) { 16793 continue 16794 } 16795 v.reset(OpAMD64ORQconst) 16796 v.AuxInt = int32ToAuxInt(int32(c)) 16797 v.AddArg(x) 16798 return true 16799 } 16800 break 16801 } 16802 // match: (ORQ x (MOVLconst [c])) 16803 // result: (ORQconst [c] x) 16804 for { 16805 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16806 x := v_0 16807 if v_1.Op != OpAMD64MOVLconst { 16808 continue 16809 } 16810 c := auxIntToInt32(v_1.AuxInt) 16811 v.reset(OpAMD64ORQconst) 16812 v.AuxInt = int32ToAuxInt(c) 16813 v.AddArg(x) 16814 return true 16815 } 16816 break 16817 } 16818 // match: (ORQ (SHRQ lo bits) (SHLQ hi (NEGQ bits))) 16819 // result: (SHRDQ lo hi bits) 16820 for { 16821 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16822 if v_0.Op != OpAMD64SHRQ { 16823 continue 16824 } 16825 bits := v_0.Args[1] 16826 lo := v_0.Args[0] 16827 if v_1.Op != OpAMD64SHLQ { 16828 continue 16829 } 16830 _ = v_1.Args[1] 16831 hi := v_1.Args[0] 16832 v_1_1 := v_1.Args[1] 16833 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { 16834 continue 16835 } 16836 v.reset(OpAMD64SHRDQ) 16837 v.AddArg3(lo, hi, bits) 16838 return true 16839 } 16840 break 16841 } 16842 // match: (ORQ (SHLQ lo bits) (SHRQ hi (NEGQ bits))) 16843 // result: (SHLDQ lo hi bits) 16844 for { 16845 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16846 if v_0.Op != OpAMD64SHLQ { 16847 continue 16848 } 16849 bits := v_0.Args[1] 16850 lo := v_0.Args[0] 16851 if v_1.Op != OpAMD64SHRQ { 16852 continue 16853 } 16854 _ = v_1.Args[1] 16855 hi := v_1.Args[0] 16856 v_1_1 := v_1.Args[1] 16857 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { 16858 continue 16859 } 16860 v.reset(OpAMD64SHLDQ) 16861 v.AddArg3(lo, hi, bits) 16862 return true 16863 } 16864 break 16865 } 16866 // match: (ORQ (SHRXQ lo bits) (SHLXQ hi (NEGQ bits))) 16867 // result: (SHRDQ lo hi bits) 16868 for { 16869 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16870 if v_0.Op != OpAMD64SHRXQ { 16871 continue 16872 } 16873 bits := v_0.Args[1] 16874 lo := v_0.Args[0] 16875 if v_1.Op != OpAMD64SHLXQ { 16876 continue 16877 } 16878 _ = v_1.Args[1] 16879 hi := v_1.Args[0] 16880 v_1_1 := v_1.Args[1] 16881 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { 16882 continue 16883 } 16884 v.reset(OpAMD64SHRDQ) 16885 v.AddArg3(lo, hi, bits) 16886 return true 16887 } 16888 break 16889 } 16890 // match: (ORQ (SHLXQ lo bits) (SHRXQ hi (NEGQ bits))) 16891 // result: (SHLDQ lo hi bits) 16892 for { 16893 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16894 if v_0.Op != OpAMD64SHLXQ { 16895 continue 16896 } 16897 bits := v_0.Args[1] 16898 lo := v_0.Args[0] 16899 if v_1.Op != OpAMD64SHRXQ { 16900 continue 16901 } 16902 _ = v_1.Args[1] 16903 hi := v_1.Args[0] 16904 v_1_1 := v_1.Args[1] 16905 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { 16906 continue 16907 } 16908 v.reset(OpAMD64SHLDQ) 16909 v.AddArg3(lo, hi, bits) 16910 return true 16911 } 16912 break 16913 } 16914 // match: (ORQ (MOVQconst [c]) (MOVQconst [d])) 16915 // result: (MOVQconst [c|d]) 16916 for { 16917 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16918 if v_0.Op != OpAMD64MOVQconst { 16919 continue 16920 } 16921 c := auxIntToInt64(v_0.AuxInt) 16922 if v_1.Op != OpAMD64MOVQconst { 16923 continue 16924 } 16925 d := auxIntToInt64(v_1.AuxInt) 16926 v.reset(OpAMD64MOVQconst) 16927 v.AuxInt = int64ToAuxInt(c | d) 16928 return true 16929 } 16930 break 16931 } 16932 // match: (ORQ x x) 16933 // result: x 16934 for { 16935 x := v_0 16936 if x != v_1 { 16937 break 16938 } 16939 v.copyOf(x) 16940 return true 16941 } 16942 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 16943 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 16944 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 16945 for { 16946 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16947 x0 := v_0 16948 if x0.Op != OpAMD64MOVBload { 16949 continue 16950 } 16951 i0 := auxIntToInt32(x0.AuxInt) 16952 s := auxToSym(x0.Aux) 16953 mem := x0.Args[1] 16954 p := x0.Args[0] 16955 sh := v_1 16956 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 { 16957 continue 16958 } 16959 x1 := sh.Args[0] 16960 if x1.Op != OpAMD64MOVBload { 16961 continue 16962 } 16963 i1 := auxIntToInt32(x1.AuxInt) 16964 if auxToSym(x1.Aux) != s { 16965 continue 16966 } 16967 _ = x1.Args[1] 16968 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 16969 continue 16970 } 16971 b = mergePoint(b, x0, x1) 16972 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 16973 v.copyOf(v0) 16974 v0.AuxInt = int32ToAuxInt(i0) 16975 v0.Aux = symToAux(s) 16976 v0.AddArg2(p, mem) 16977 return true 16978 } 16979 break 16980 } 16981 // match: (ORQ x0:(MOVBload [i] {s} p0 mem) sh:(SHLQconst [8] x1:(MOVBload [i] {s} p1 mem))) 16982 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 16983 // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem) 16984 for { 16985 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 16986 x0 := v_0 16987 if x0.Op != OpAMD64MOVBload { 16988 continue 16989 } 16990 i := auxIntToInt32(x0.AuxInt) 16991 s := auxToSym(x0.Aux) 16992 mem := x0.Args[1] 16993 p0 := x0.Args[0] 16994 sh := v_1 16995 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 { 16996 continue 16997 } 16998 x1 := sh.Args[0] 16999 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { 17000 continue 17001 } 17002 _ = x1.Args[1] 17003 p1 := x1.Args[0] 17004 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 17005 continue 17006 } 17007 b = mergePoint(b, x0, x1) 17008 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 17009 v.copyOf(v0) 17010 v0.AuxInt = int32ToAuxInt(i) 17011 v0.Aux = symToAux(s) 17012 v0.AddArg2(p0, mem) 17013 return true 17014 } 17015 break 17016 } 17017 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 17018 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 17019 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 17020 for { 17021 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17022 x0 := v_0 17023 if x0.Op != OpAMD64MOVWload { 17024 continue 17025 } 17026 i0 := auxIntToInt32(x0.AuxInt) 17027 s := auxToSym(x0.Aux) 17028 mem := x0.Args[1] 17029 p := x0.Args[0] 17030 sh := v_1 17031 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 { 17032 continue 17033 } 17034 x1 := sh.Args[0] 17035 if x1.Op != OpAMD64MOVWload { 17036 continue 17037 } 17038 i1 := auxIntToInt32(x1.AuxInt) 17039 if auxToSym(x1.Aux) != s { 17040 continue 17041 } 17042 _ = x1.Args[1] 17043 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 17044 continue 17045 } 17046 b = mergePoint(b, x0, x1) 17047 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 17048 v.copyOf(v0) 17049 v0.AuxInt = int32ToAuxInt(i0) 17050 v0.Aux = symToAux(s) 17051 v0.AddArg2(p, mem) 17052 return true 17053 } 17054 break 17055 } 17056 // match: (ORQ x0:(MOVWload [i] {s} p0 mem) sh:(SHLQconst [16] x1:(MOVWload [i] {s} p1 mem))) 17057 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 17058 // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem) 17059 for { 17060 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17061 x0 := v_0 17062 if x0.Op != OpAMD64MOVWload { 17063 continue 17064 } 17065 i := auxIntToInt32(x0.AuxInt) 17066 s := auxToSym(x0.Aux) 17067 mem := x0.Args[1] 17068 p0 := x0.Args[0] 17069 sh := v_1 17070 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 { 17071 continue 17072 } 17073 x1 := sh.Args[0] 17074 if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { 17075 continue 17076 } 17077 _ = x1.Args[1] 17078 p1 := x1.Args[0] 17079 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 17080 continue 17081 } 17082 b = mergePoint(b, x0, x1) 17083 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 17084 v.copyOf(v0) 17085 v0.AuxInt = int32ToAuxInt(i) 17086 v0.Aux = symToAux(s) 17087 v0.AddArg2(p0, mem) 17088 return true 17089 } 17090 break 17091 } 17092 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 17093 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 17094 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 17095 for { 17096 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17097 x0 := v_0 17098 if x0.Op != OpAMD64MOVLload { 17099 continue 17100 } 17101 i0 := auxIntToInt32(x0.AuxInt) 17102 s := auxToSym(x0.Aux) 17103 mem := x0.Args[1] 17104 p := x0.Args[0] 17105 sh := v_1 17106 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 { 17107 continue 17108 } 17109 x1 := sh.Args[0] 17110 if x1.Op != OpAMD64MOVLload { 17111 continue 17112 } 17113 i1 := auxIntToInt32(x1.AuxInt) 17114 if auxToSym(x1.Aux) != s { 17115 continue 17116 } 17117 _ = x1.Args[1] 17118 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 17119 continue 17120 } 17121 b = mergePoint(b, x0, x1) 17122 v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) 17123 v.copyOf(v0) 17124 v0.AuxInt = int32ToAuxInt(i0) 17125 v0.Aux = symToAux(s) 17126 v0.AddArg2(p, mem) 17127 return true 17128 } 17129 break 17130 } 17131 // match: (ORQ x0:(MOVLload [i] {s} p0 mem) sh:(SHLQconst [32] x1:(MOVLload [i] {s} p1 mem))) 17132 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 17133 // result: @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem) 17134 for { 17135 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17136 x0 := v_0 17137 if x0.Op != OpAMD64MOVLload { 17138 continue 17139 } 17140 i := auxIntToInt32(x0.AuxInt) 17141 s := auxToSym(x0.Aux) 17142 mem := x0.Args[1] 17143 p0 := x0.Args[0] 17144 sh := v_1 17145 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 { 17146 continue 17147 } 17148 x1 := sh.Args[0] 17149 if x1.Op != OpAMD64MOVLload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { 17150 continue 17151 } 17152 _ = x1.Args[1] 17153 p1 := x1.Args[0] 17154 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 17155 continue 17156 } 17157 b = mergePoint(b, x0, x1) 17158 v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) 17159 v.copyOf(v0) 17160 v0.AuxInt = int32ToAuxInt(i) 17161 v0.Aux = symToAux(s) 17162 v0.AddArg2(p0, mem) 17163 return true 17164 } 17165 break 17166 } 17167 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 17168 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) 17169 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 17170 for { 17171 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17172 s1 := v_0 17173 if s1.Op != OpAMD64SHLQconst { 17174 continue 17175 } 17176 j1 := auxIntToInt8(s1.AuxInt) 17177 x1 := s1.Args[0] 17178 if x1.Op != OpAMD64MOVBload { 17179 continue 17180 } 17181 i1 := auxIntToInt32(x1.AuxInt) 17182 s := auxToSym(x1.Aux) 17183 mem := x1.Args[1] 17184 p := x1.Args[0] 17185 or := v_1 17186 if or.Op != OpAMD64ORQ { 17187 continue 17188 } 17189 _ = or.Args[1] 17190 or_0 := or.Args[0] 17191 or_1 := or.Args[1] 17192 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { 17193 s0 := or_0 17194 if s0.Op != OpAMD64SHLQconst { 17195 continue 17196 } 17197 j0 := auxIntToInt8(s0.AuxInt) 17198 x0 := s0.Args[0] 17199 if x0.Op != OpAMD64MOVBload { 17200 continue 17201 } 17202 i0 := auxIntToInt32(x0.AuxInt) 17203 if auxToSym(x0.Aux) != s { 17204 continue 17205 } 17206 _ = x0.Args[1] 17207 if p != x0.Args[0] || mem != x0.Args[1] { 17208 continue 17209 } 17210 y := or_1 17211 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { 17212 continue 17213 } 17214 b = mergePoint(b, x0, x1, y) 17215 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 17216 v.copyOf(v0) 17217 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 17218 v1.AuxInt = int8ToAuxInt(j0) 17219 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 17220 v2.AuxInt = int32ToAuxInt(i0) 17221 v2.Aux = symToAux(s) 17222 v2.AddArg2(p, mem) 17223 v1.AddArg(v2) 17224 v0.AddArg2(v1, y) 17225 return true 17226 } 17227 } 17228 break 17229 } 17230 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i] {s} p1 mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i] {s} p0 mem)) y)) 17231 // cond: j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) 17232 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y) 17233 for { 17234 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17235 s1 := v_0 17236 if s1.Op != OpAMD64SHLQconst { 17237 continue 17238 } 17239 j1 := auxIntToInt8(s1.AuxInt) 17240 x1 := s1.Args[0] 17241 if x1.Op != OpAMD64MOVBload { 17242 continue 17243 } 17244 i := auxIntToInt32(x1.AuxInt) 17245 s := auxToSym(x1.Aux) 17246 mem := x1.Args[1] 17247 p1 := x1.Args[0] 17248 or := v_1 17249 if or.Op != OpAMD64ORQ { 17250 continue 17251 } 17252 _ = or.Args[1] 17253 or_0 := or.Args[0] 17254 or_1 := or.Args[1] 17255 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { 17256 s0 := or_0 17257 if s0.Op != OpAMD64SHLQconst { 17258 continue 17259 } 17260 j0 := auxIntToInt8(s0.AuxInt) 17261 x0 := s0.Args[0] 17262 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { 17263 continue 17264 } 17265 _ = x0.Args[1] 17266 p0 := x0.Args[0] 17267 if mem != x0.Args[1] { 17268 continue 17269 } 17270 y := or_1 17271 if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { 17272 continue 17273 } 17274 b = mergePoint(b, x0, x1, y) 17275 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 17276 v.copyOf(v0) 17277 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 17278 v1.AuxInt = int8ToAuxInt(j0) 17279 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 17280 v2.AuxInt = int32ToAuxInt(i) 17281 v2.Aux = symToAux(s) 17282 v2.AddArg2(p0, mem) 17283 v1.AddArg(v2) 17284 v0.AddArg2(v1, y) 17285 return true 17286 } 17287 } 17288 break 17289 } 17290 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 17291 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) 17292 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 17293 for { 17294 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17295 s1 := v_0 17296 if s1.Op != OpAMD64SHLQconst { 17297 continue 17298 } 17299 j1 := auxIntToInt8(s1.AuxInt) 17300 x1 := s1.Args[0] 17301 if x1.Op != OpAMD64MOVWload { 17302 continue 17303 } 17304 i1 := auxIntToInt32(x1.AuxInt) 17305 s := auxToSym(x1.Aux) 17306 mem := x1.Args[1] 17307 p := x1.Args[0] 17308 or := v_1 17309 if or.Op != OpAMD64ORQ { 17310 continue 17311 } 17312 _ = or.Args[1] 17313 or_0 := or.Args[0] 17314 or_1 := or.Args[1] 17315 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { 17316 s0 := or_0 17317 if s0.Op != OpAMD64SHLQconst { 17318 continue 17319 } 17320 j0 := auxIntToInt8(s0.AuxInt) 17321 x0 := s0.Args[0] 17322 if x0.Op != OpAMD64MOVWload { 17323 continue 17324 } 17325 i0 := auxIntToInt32(x0.AuxInt) 17326 if auxToSym(x0.Aux) != s { 17327 continue 17328 } 17329 _ = x0.Args[1] 17330 if p != x0.Args[0] || mem != x0.Args[1] { 17331 continue 17332 } 17333 y := or_1 17334 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { 17335 continue 17336 } 17337 b = mergePoint(b, x0, x1, y) 17338 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 17339 v.copyOf(v0) 17340 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 17341 v1.AuxInt = int8ToAuxInt(j0) 17342 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 17343 v2.AuxInt = int32ToAuxInt(i0) 17344 v2.Aux = symToAux(s) 17345 v2.AddArg2(p, mem) 17346 v1.AddArg(v2) 17347 v0.AddArg2(v1, y) 17348 return true 17349 } 17350 } 17351 break 17352 } 17353 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i] {s} p1 mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i] {s} p0 mem)) y)) 17354 // cond: j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) 17355 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i] {s} p0 mem)) y) 17356 for { 17357 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17358 s1 := v_0 17359 if s1.Op != OpAMD64SHLQconst { 17360 continue 17361 } 17362 j1 := auxIntToInt8(s1.AuxInt) 17363 x1 := s1.Args[0] 17364 if x1.Op != OpAMD64MOVWload { 17365 continue 17366 } 17367 i := auxIntToInt32(x1.AuxInt) 17368 s := auxToSym(x1.Aux) 17369 mem := x1.Args[1] 17370 p1 := x1.Args[0] 17371 or := v_1 17372 if or.Op != OpAMD64ORQ { 17373 continue 17374 } 17375 _ = or.Args[1] 17376 or_0 := or.Args[0] 17377 or_1 := or.Args[1] 17378 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { 17379 s0 := or_0 17380 if s0.Op != OpAMD64SHLQconst { 17381 continue 17382 } 17383 j0 := auxIntToInt8(s0.AuxInt) 17384 x0 := s0.Args[0] 17385 if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { 17386 continue 17387 } 17388 _ = x0.Args[1] 17389 p0 := x0.Args[0] 17390 if mem != x0.Args[1] { 17391 continue 17392 } 17393 y := or_1 17394 if !(j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { 17395 continue 17396 } 17397 b = mergePoint(b, x0, x1, y) 17398 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 17399 v.copyOf(v0) 17400 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 17401 v1.AuxInt = int8ToAuxInt(j0) 17402 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 17403 v2.AuxInt = int32ToAuxInt(i) 17404 v2.Aux = symToAux(s) 17405 v2.AddArg2(p0, mem) 17406 v1.AddArg(v2) 17407 v0.AddArg2(v1, y) 17408 return true 17409 } 17410 } 17411 break 17412 } 17413 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 17414 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 17415 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 17416 for { 17417 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17418 x1 := v_0 17419 if x1.Op != OpAMD64MOVBload { 17420 continue 17421 } 17422 i1 := auxIntToInt32(x1.AuxInt) 17423 s := auxToSym(x1.Aux) 17424 mem := x1.Args[1] 17425 p := x1.Args[0] 17426 sh := v_1 17427 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 { 17428 continue 17429 } 17430 x0 := sh.Args[0] 17431 if x0.Op != OpAMD64MOVBload { 17432 continue 17433 } 17434 i0 := auxIntToInt32(x0.AuxInt) 17435 if auxToSym(x0.Aux) != s { 17436 continue 17437 } 17438 _ = x0.Args[1] 17439 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 17440 continue 17441 } 17442 b = mergePoint(b, x0, x1) 17443 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) 17444 v.copyOf(v0) 17445 v0.AuxInt = int8ToAuxInt(8) 17446 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 17447 v1.AuxInt = int32ToAuxInt(i0) 17448 v1.Aux = symToAux(s) 17449 v1.AddArg2(p, mem) 17450 v0.AddArg(v1) 17451 return true 17452 } 17453 break 17454 } 17455 // match: (ORQ x1:(MOVBload [i] {s} p1 mem) sh:(SHLQconst [8] x0:(MOVBload [i] {s} p0 mem))) 17456 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 17457 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem)) 17458 for { 17459 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17460 x1 := v_0 17461 if x1.Op != OpAMD64MOVBload { 17462 continue 17463 } 17464 i := auxIntToInt32(x1.AuxInt) 17465 s := auxToSym(x1.Aux) 17466 mem := x1.Args[1] 17467 p1 := x1.Args[0] 17468 sh := v_1 17469 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 { 17470 continue 17471 } 17472 x0 := sh.Args[0] 17473 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { 17474 continue 17475 } 17476 _ = x0.Args[1] 17477 p0 := x0.Args[0] 17478 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 17479 continue 17480 } 17481 b = mergePoint(b, x0, x1) 17482 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) 17483 v.copyOf(v0) 17484 v0.AuxInt = int8ToAuxInt(8) 17485 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 17486 v1.AuxInt = int32ToAuxInt(i) 17487 v1.Aux = symToAux(s) 17488 v1.AddArg2(p0, mem) 17489 v0.AddArg(v1) 17490 return true 17491 } 17492 break 17493 } 17494 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 17495 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) 17496 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 17497 for { 17498 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17499 r1 := v_0 17500 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 { 17501 continue 17502 } 17503 x1 := r1.Args[0] 17504 if x1.Op != OpAMD64MOVWload { 17505 continue 17506 } 17507 i1 := auxIntToInt32(x1.AuxInt) 17508 s := auxToSym(x1.Aux) 17509 mem := x1.Args[1] 17510 p := x1.Args[0] 17511 sh := v_1 17512 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 { 17513 continue 17514 } 17515 r0 := sh.Args[0] 17516 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 { 17517 continue 17518 } 17519 x0 := r0.Args[0] 17520 if x0.Op != OpAMD64MOVWload { 17521 continue 17522 } 17523 i0 := auxIntToInt32(x0.AuxInt) 17524 if auxToSym(x0.Aux) != s { 17525 continue 17526 } 17527 _ = x0.Args[1] 17528 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) { 17529 continue 17530 } 17531 b = mergePoint(b, x0, x1) 17532 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) 17533 v.copyOf(v0) 17534 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 17535 v1.AuxInt = int32ToAuxInt(i0) 17536 v1.Aux = symToAux(s) 17537 v1.AddArg2(p, mem) 17538 v0.AddArg(v1) 17539 return true 17540 } 17541 break 17542 } 17543 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem)))) 17544 // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) 17545 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem)) 17546 for { 17547 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17548 r1 := v_0 17549 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 { 17550 continue 17551 } 17552 x1 := r1.Args[0] 17553 if x1.Op != OpAMD64MOVWload { 17554 continue 17555 } 17556 i := auxIntToInt32(x1.AuxInt) 17557 s := auxToSym(x1.Aux) 17558 mem := x1.Args[1] 17559 p1 := x1.Args[0] 17560 sh := v_1 17561 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 { 17562 continue 17563 } 17564 r0 := sh.Args[0] 17565 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 { 17566 continue 17567 } 17568 x0 := r0.Args[0] 17569 if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { 17570 continue 17571 } 17572 _ = x0.Args[1] 17573 p0 := x0.Args[0] 17574 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) { 17575 continue 17576 } 17577 b = mergePoint(b, x0, x1) 17578 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) 17579 v.copyOf(v0) 17580 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 17581 v1.AuxInt = int32ToAuxInt(i) 17582 v1.Aux = symToAux(s) 17583 v1.AddArg2(p0, mem) 17584 v0.AddArg(v1) 17585 return true 17586 } 17587 break 17588 } 17589 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 17590 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) 17591 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 17592 for { 17593 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17594 r1 := v_0 17595 if r1.Op != OpAMD64BSWAPL { 17596 continue 17597 } 17598 x1 := r1.Args[0] 17599 if x1.Op != OpAMD64MOVLload { 17600 continue 17601 } 17602 i1 := auxIntToInt32(x1.AuxInt) 17603 s := auxToSym(x1.Aux) 17604 mem := x1.Args[1] 17605 p := x1.Args[0] 17606 sh := v_1 17607 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 { 17608 continue 17609 } 17610 r0 := sh.Args[0] 17611 if r0.Op != OpAMD64BSWAPL { 17612 continue 17613 } 17614 x0 := r0.Args[0] 17615 if x0.Op != OpAMD64MOVLload { 17616 continue 17617 } 17618 i0 := auxIntToInt32(x0.AuxInt) 17619 if auxToSym(x0.Aux) != s { 17620 continue 17621 } 17622 _ = x0.Args[1] 17623 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) { 17624 continue 17625 } 17626 b = mergePoint(b, x0, x1) 17627 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type) 17628 v.copyOf(v0) 17629 v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) 17630 v1.AuxInt = int32ToAuxInt(i0) 17631 v1.Aux = symToAux(s) 17632 v1.AddArg2(p, mem) 17633 v0.AddArg(v1) 17634 return true 17635 } 17636 break 17637 } 17638 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i] {s} p1 mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i] {s} p0 mem)))) 17639 // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) 17640 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i] {s} p0 mem)) 17641 for { 17642 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17643 r1 := v_0 17644 if r1.Op != OpAMD64BSWAPL { 17645 continue 17646 } 17647 x1 := r1.Args[0] 17648 if x1.Op != OpAMD64MOVLload { 17649 continue 17650 } 17651 i := auxIntToInt32(x1.AuxInt) 17652 s := auxToSym(x1.Aux) 17653 mem := x1.Args[1] 17654 p1 := x1.Args[0] 17655 sh := v_1 17656 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 { 17657 continue 17658 } 17659 r0 := sh.Args[0] 17660 if r0.Op != OpAMD64BSWAPL { 17661 continue 17662 } 17663 x0 := r0.Args[0] 17664 if x0.Op != OpAMD64MOVLload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { 17665 continue 17666 } 17667 _ = x0.Args[1] 17668 p0 := x0.Args[0] 17669 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) { 17670 continue 17671 } 17672 b = mergePoint(b, x0, x1) 17673 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type) 17674 v.copyOf(v0) 17675 v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) 17676 v1.AuxInt = int32ToAuxInt(i) 17677 v1.Aux = symToAux(s) 17678 v1.AddArg2(p0, mem) 17679 v0.AddArg(v1) 17680 return true 17681 } 17682 break 17683 } 17684 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 17685 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) 17686 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 17687 for { 17688 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17689 s0 := v_0 17690 if s0.Op != OpAMD64SHLQconst { 17691 continue 17692 } 17693 j0 := auxIntToInt8(s0.AuxInt) 17694 x0 := s0.Args[0] 17695 if x0.Op != OpAMD64MOVBload { 17696 continue 17697 } 17698 i0 := auxIntToInt32(x0.AuxInt) 17699 s := auxToSym(x0.Aux) 17700 mem := x0.Args[1] 17701 p := x0.Args[0] 17702 or := v_1 17703 if or.Op != OpAMD64ORQ { 17704 continue 17705 } 17706 _ = or.Args[1] 17707 or_0 := or.Args[0] 17708 or_1 := or.Args[1] 17709 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { 17710 s1 := or_0 17711 if s1.Op != OpAMD64SHLQconst { 17712 continue 17713 } 17714 j1 := auxIntToInt8(s1.AuxInt) 17715 x1 := s1.Args[0] 17716 if x1.Op != OpAMD64MOVBload { 17717 continue 17718 } 17719 i1 := auxIntToInt32(x1.AuxInt) 17720 if auxToSym(x1.Aux) != s { 17721 continue 17722 } 17723 _ = x1.Args[1] 17724 if p != x1.Args[0] || mem != x1.Args[1] { 17725 continue 17726 } 17727 y := or_1 17728 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { 17729 continue 17730 } 17731 b = mergePoint(b, x0, x1, y) 17732 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 17733 v.copyOf(v0) 17734 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 17735 v1.AuxInt = int8ToAuxInt(j1) 17736 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) 17737 v2.AuxInt = int8ToAuxInt(8) 17738 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 17739 v3.AuxInt = int32ToAuxInt(i0) 17740 v3.Aux = symToAux(s) 17741 v3.AddArg2(p, mem) 17742 v2.AddArg(v3) 17743 v1.AddArg(v2) 17744 v0.AddArg2(v1, y) 17745 return true 17746 } 17747 } 17748 break 17749 } 17750 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i] {s} p0 mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i] {s} p1 mem)) y)) 17751 // cond: j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) 17752 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y) 17753 for { 17754 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17755 s0 := v_0 17756 if s0.Op != OpAMD64SHLQconst { 17757 continue 17758 } 17759 j0 := auxIntToInt8(s0.AuxInt) 17760 x0 := s0.Args[0] 17761 if x0.Op != OpAMD64MOVBload { 17762 continue 17763 } 17764 i := auxIntToInt32(x0.AuxInt) 17765 s := auxToSym(x0.Aux) 17766 mem := x0.Args[1] 17767 p0 := x0.Args[0] 17768 or := v_1 17769 if or.Op != OpAMD64ORQ { 17770 continue 17771 } 17772 _ = or.Args[1] 17773 or_0 := or.Args[0] 17774 or_1 := or.Args[1] 17775 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { 17776 s1 := or_0 17777 if s1.Op != OpAMD64SHLQconst { 17778 continue 17779 } 17780 j1 := auxIntToInt8(s1.AuxInt) 17781 x1 := s1.Args[0] 17782 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { 17783 continue 17784 } 17785 _ = x1.Args[1] 17786 p1 := x1.Args[0] 17787 if mem != x1.Args[1] { 17788 continue 17789 } 17790 y := or_1 17791 if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) { 17792 continue 17793 } 17794 b = mergePoint(b, x0, x1, y) 17795 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 17796 v.copyOf(v0) 17797 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 17798 v1.AuxInt = int8ToAuxInt(j1) 17799 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) 17800 v2.AuxInt = int8ToAuxInt(8) 17801 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 17802 v3.AuxInt = int32ToAuxInt(i) 17803 v3.Aux = symToAux(s) 17804 v3.AddArg2(p0, mem) 17805 v2.AddArg(v3) 17806 v1.AddArg(v2) 17807 v0.AddArg2(v1, y) 17808 return true 17809 } 17810 } 17811 break 17812 } 17813 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 17814 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or) 17815 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 17816 for { 17817 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17818 s0 := v_0 17819 if s0.Op != OpAMD64SHLQconst { 17820 continue 17821 } 17822 j0 := auxIntToInt8(s0.AuxInt) 17823 r0 := s0.Args[0] 17824 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 { 17825 continue 17826 } 17827 x0 := r0.Args[0] 17828 if x0.Op != OpAMD64MOVWload { 17829 continue 17830 } 17831 i0 := auxIntToInt32(x0.AuxInt) 17832 s := auxToSym(x0.Aux) 17833 mem := x0.Args[1] 17834 p := x0.Args[0] 17835 or := v_1 17836 if or.Op != OpAMD64ORQ { 17837 continue 17838 } 17839 _ = or.Args[1] 17840 or_0 := or.Args[0] 17841 or_1 := or.Args[1] 17842 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { 17843 s1 := or_0 17844 if s1.Op != OpAMD64SHLQconst { 17845 continue 17846 } 17847 j1 := auxIntToInt8(s1.AuxInt) 17848 r1 := s1.Args[0] 17849 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 { 17850 continue 17851 } 17852 x1 := r1.Args[0] 17853 if x1.Op != OpAMD64MOVWload { 17854 continue 17855 } 17856 i1 := auxIntToInt32(x1.AuxInt) 17857 if auxToSym(x1.Aux) != s { 17858 continue 17859 } 17860 _ = x1.Args[1] 17861 if p != x1.Args[0] || mem != x1.Args[1] { 17862 continue 17863 } 17864 y := or_1 17865 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) { 17866 continue 17867 } 17868 b = mergePoint(b, x0, x1, y) 17869 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 17870 v.copyOf(v0) 17871 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 17872 v1.AuxInt = int8ToAuxInt(j1) 17873 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) 17874 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 17875 v3.AuxInt = int32ToAuxInt(i0) 17876 v3.Aux = symToAux(s) 17877 v3.AddArg2(p, mem) 17878 v2.AddArg(v3) 17879 v1.AddArg(v2) 17880 v0.AddArg2(v1, y) 17881 return true 17882 } 17883 } 17884 break 17885 } 17886 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem))) y)) 17887 // cond: j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or) 17888 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i] {s} p0 mem))) y) 17889 for { 17890 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17891 s0 := v_0 17892 if s0.Op != OpAMD64SHLQconst { 17893 continue 17894 } 17895 j0 := auxIntToInt8(s0.AuxInt) 17896 r0 := s0.Args[0] 17897 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 { 17898 continue 17899 } 17900 x0 := r0.Args[0] 17901 if x0.Op != OpAMD64MOVWload { 17902 continue 17903 } 17904 i := auxIntToInt32(x0.AuxInt) 17905 s := auxToSym(x0.Aux) 17906 mem := x0.Args[1] 17907 p0 := x0.Args[0] 17908 or := v_1 17909 if or.Op != OpAMD64ORQ { 17910 continue 17911 } 17912 _ = or.Args[1] 17913 or_0 := or.Args[0] 17914 or_1 := or.Args[1] 17915 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 { 17916 s1 := or_0 17917 if s1.Op != OpAMD64SHLQconst { 17918 continue 17919 } 17920 j1 := auxIntToInt8(s1.AuxInt) 17921 r1 := s1.Args[0] 17922 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 { 17923 continue 17924 } 17925 x1 := r1.Args[0] 17926 if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { 17927 continue 17928 } 17929 _ = x1.Args[1] 17930 p1 := x1.Args[0] 17931 if mem != x1.Args[1] { 17932 continue 17933 } 17934 y := or_1 17935 if !(j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) { 17936 continue 17937 } 17938 b = mergePoint(b, x0, x1, y) 17939 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 17940 v.copyOf(v0) 17941 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 17942 v1.AuxInt = int8ToAuxInt(j1) 17943 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) 17944 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 17945 v3.AuxInt = int32ToAuxInt(i) 17946 v3.Aux = symToAux(s) 17947 v3.AddArg2(p0, mem) 17948 v2.AddArg(v3) 17949 v1.AddArg(v2) 17950 v0.AddArg2(v1, y) 17951 return true 17952 } 17953 } 17954 break 17955 } 17956 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 17957 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 17958 // result: (ORQload x [off] {sym} ptr mem) 17959 for { 17960 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17961 x := v_0 17962 l := v_1 17963 if l.Op != OpAMD64MOVQload { 17964 continue 17965 } 17966 off := auxIntToInt32(l.AuxInt) 17967 sym := auxToSym(l.Aux) 17968 mem := l.Args[1] 17969 ptr := l.Args[0] 17970 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 17971 continue 17972 } 17973 v.reset(OpAMD64ORQload) 17974 v.AuxInt = int32ToAuxInt(off) 17975 v.Aux = symToAux(sym) 17976 v.AddArg3(x, ptr, mem) 17977 return true 17978 } 17979 break 17980 } 17981 // match: (ORQ x0:(MOVBELload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVBELload [i1] {s} p mem))) 17982 // cond: i0 == i1+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 17983 // result: @mergePoint(b,x0,x1) (MOVBEQload [i1] {s} p mem) 17984 for { 17985 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 17986 x0 := v_0 17987 if x0.Op != OpAMD64MOVBELload { 17988 continue 17989 } 17990 i0 := auxIntToInt32(x0.AuxInt) 17991 s := auxToSym(x0.Aux) 17992 mem := x0.Args[1] 17993 p := x0.Args[0] 17994 sh := v_1 17995 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 { 17996 continue 17997 } 17998 x1 := sh.Args[0] 17999 if x1.Op != OpAMD64MOVBELload { 18000 continue 18001 } 18002 i1 := auxIntToInt32(x1.AuxInt) 18003 if auxToSym(x1.Aux) != s { 18004 continue 18005 } 18006 _ = x1.Args[1] 18007 if p != x1.Args[0] || mem != x1.Args[1] || !(i0 == i1+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 18008 continue 18009 } 18010 b = mergePoint(b, x0, x1) 18011 v0 := b.NewValue0(x1.Pos, OpAMD64MOVBEQload, typ.UInt64) 18012 v.copyOf(v0) 18013 v0.AuxInt = int32ToAuxInt(i1) 18014 v0.Aux = symToAux(s) 18015 v0.AddArg2(p, mem) 18016 return true 18017 } 18018 break 18019 } 18020 // match: (ORQ x0:(MOVBELload [i] {s} p0 mem) sh:(SHLQconst [32] x1:(MOVBELload [i] {s} p1 mem))) 18021 // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p1, p0, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) 18022 // result: @mergePoint(b,x0,x1) (MOVBEQload [i] {s} p1 mem) 18023 for { 18024 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 18025 x0 := v_0 18026 if x0.Op != OpAMD64MOVBELload { 18027 continue 18028 } 18029 i := auxIntToInt32(x0.AuxInt) 18030 s := auxToSym(x0.Aux) 18031 mem := x0.Args[1] 18032 p0 := x0.Args[0] 18033 sh := v_1 18034 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 { 18035 continue 18036 } 18037 x1 := sh.Args[0] 18038 if x1.Op != OpAMD64MOVBELload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { 18039 continue 18040 } 18041 _ = x1.Args[1] 18042 p1 := x1.Args[0] 18043 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p1, p0, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { 18044 continue 18045 } 18046 b = mergePoint(b, x0, x1) 18047 v0 := b.NewValue0(x1.Pos, OpAMD64MOVBEQload, typ.UInt64) 18048 v.copyOf(v0) 18049 v0.AuxInt = int32ToAuxInt(i) 18050 v0.Aux = symToAux(s) 18051 v0.AddArg2(p1, mem) 18052 return true 18053 } 18054 break 18055 } 18056 return false 18057 } 18058 func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool { 18059 v_0 := v.Args[0] 18060 // match: (ORQconst [c] x) 18061 // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128 18062 // result: (BTSQconst [int8(log32(c))] x) 18063 for { 18064 c := auxIntToInt32(v.AuxInt) 18065 x := v_0 18066 if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) { 18067 break 18068 } 18069 v.reset(OpAMD64BTSQconst) 18070 v.AuxInt = int8ToAuxInt(int8(log32(c))) 18071 v.AddArg(x) 18072 return true 18073 } 18074 // match: (ORQconst [c] (ORQconst [d] x)) 18075 // result: (ORQconst [c | d] x) 18076 for { 18077 c := auxIntToInt32(v.AuxInt) 18078 if v_0.Op != OpAMD64ORQconst { 18079 break 18080 } 18081 d := auxIntToInt32(v_0.AuxInt) 18082 x := v_0.Args[0] 18083 v.reset(OpAMD64ORQconst) 18084 v.AuxInt = int32ToAuxInt(c | d) 18085 v.AddArg(x) 18086 return true 18087 } 18088 // match: (ORQconst [c] (BTSQconst [d] x)) 18089 // cond: is32Bit(int64(c) | 1<<uint32(d)) 18090 // result: (ORQconst [c | 1<<uint32(d)] x) 18091 for { 18092 c := auxIntToInt32(v.AuxInt) 18093 if v_0.Op != OpAMD64BTSQconst { 18094 break 18095 } 18096 d := auxIntToInt8(v_0.AuxInt) 18097 x := v_0.Args[0] 18098 if !(is32Bit(int64(c) | 1<<uint32(d))) { 18099 break 18100 } 18101 v.reset(OpAMD64ORQconst) 18102 v.AuxInt = int32ToAuxInt(c | 1<<uint32(d)) 18103 v.AddArg(x) 18104 return true 18105 } 18106 // match: (ORQconst [0] x) 18107 // result: x 18108 for { 18109 if auxIntToInt32(v.AuxInt) != 0 { 18110 break 18111 } 18112 x := v_0 18113 v.copyOf(x) 18114 return true 18115 } 18116 // match: (ORQconst [-1] _) 18117 // result: (MOVQconst [-1]) 18118 for { 18119 if auxIntToInt32(v.AuxInt) != -1 { 18120 break 18121 } 18122 v.reset(OpAMD64MOVQconst) 18123 v.AuxInt = int64ToAuxInt(-1) 18124 return true 18125 } 18126 // match: (ORQconst [c] (MOVQconst [d])) 18127 // result: (MOVQconst [int64(c)|d]) 18128 for { 18129 c := auxIntToInt32(v.AuxInt) 18130 if v_0.Op != OpAMD64MOVQconst { 18131 break 18132 } 18133 d := auxIntToInt64(v_0.AuxInt) 18134 v.reset(OpAMD64MOVQconst) 18135 v.AuxInt = int64ToAuxInt(int64(c) | d) 18136 return true 18137 } 18138 return false 18139 } 18140 func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool { 18141 v_1 := v.Args[1] 18142 v_0 := v.Args[0] 18143 // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 18144 // cond: ValAndOff(valoff1).canAdd32(off2) 18145 // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 18146 for { 18147 valoff1 := auxIntToValAndOff(v.AuxInt) 18148 sym := auxToSym(v.Aux) 18149 if v_0.Op != OpAMD64ADDQconst { 18150 break 18151 } 18152 off2 := auxIntToInt32(v_0.AuxInt) 18153 base := v_0.Args[0] 18154 mem := v_1 18155 if !(ValAndOff(valoff1).canAdd32(off2)) { 18156 break 18157 } 18158 v.reset(OpAMD64ORQconstmodify) 18159 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 18160 v.Aux = symToAux(sym) 18161 v.AddArg2(base, mem) 18162 return true 18163 } 18164 // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 18165 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 18166 // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 18167 for { 18168 valoff1 := auxIntToValAndOff(v.AuxInt) 18169 sym1 := auxToSym(v.Aux) 18170 if v_0.Op != OpAMD64LEAQ { 18171 break 18172 } 18173 off2 := auxIntToInt32(v_0.AuxInt) 18174 sym2 := auxToSym(v_0.Aux) 18175 base := v_0.Args[0] 18176 mem := v_1 18177 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 18178 break 18179 } 18180 v.reset(OpAMD64ORQconstmodify) 18181 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 18182 v.Aux = symToAux(mergeSym(sym1, sym2)) 18183 v.AddArg2(base, mem) 18184 return true 18185 } 18186 return false 18187 } 18188 func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { 18189 v_2 := v.Args[2] 18190 v_1 := v.Args[1] 18191 v_0 := v.Args[0] 18192 b := v.Block 18193 typ := &b.Func.Config.Types 18194 // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem) 18195 // cond: is32Bit(int64(off1)+int64(off2)) 18196 // result: (ORQload [off1+off2] {sym} val base mem) 18197 for { 18198 off1 := auxIntToInt32(v.AuxInt) 18199 sym := auxToSym(v.Aux) 18200 val := v_0 18201 if v_1.Op != OpAMD64ADDQconst { 18202 break 18203 } 18204 off2 := auxIntToInt32(v_1.AuxInt) 18205 base := v_1.Args[0] 18206 mem := v_2 18207 if !(is32Bit(int64(off1) + int64(off2))) { 18208 break 18209 } 18210 v.reset(OpAMD64ORQload) 18211 v.AuxInt = int32ToAuxInt(off1 + off2) 18212 v.Aux = symToAux(sym) 18213 v.AddArg3(val, base, mem) 18214 return true 18215 } 18216 // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 18217 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 18218 // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 18219 for { 18220 off1 := auxIntToInt32(v.AuxInt) 18221 sym1 := auxToSym(v.Aux) 18222 val := v_0 18223 if v_1.Op != OpAMD64LEAQ { 18224 break 18225 } 18226 off2 := auxIntToInt32(v_1.AuxInt) 18227 sym2 := auxToSym(v_1.Aux) 18228 base := v_1.Args[0] 18229 mem := v_2 18230 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 18231 break 18232 } 18233 v.reset(OpAMD64ORQload) 18234 v.AuxInt = int32ToAuxInt(off1 + off2) 18235 v.Aux = symToAux(mergeSym(sym1, sym2)) 18236 v.AddArg3(val, base, mem) 18237 return true 18238 } 18239 // match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 18240 // result: ( ORQ x (MOVQf2i y)) 18241 for { 18242 off := auxIntToInt32(v.AuxInt) 18243 sym := auxToSym(v.Aux) 18244 x := v_0 18245 ptr := v_1 18246 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 18247 break 18248 } 18249 y := v_2.Args[1] 18250 if ptr != v_2.Args[0] { 18251 break 18252 } 18253 v.reset(OpAMD64ORQ) 18254 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 18255 v0.AddArg(y) 18256 v.AddArg2(x, v0) 18257 return true 18258 } 18259 return false 18260 } 18261 func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool { 18262 v_2 := v.Args[2] 18263 v_1 := v.Args[1] 18264 v_0 := v.Args[0] 18265 // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 18266 // cond: is32Bit(int64(off1)+int64(off2)) 18267 // result: (ORQmodify [off1+off2] {sym} base val mem) 18268 for { 18269 off1 := auxIntToInt32(v.AuxInt) 18270 sym := auxToSym(v.Aux) 18271 if v_0.Op != OpAMD64ADDQconst { 18272 break 18273 } 18274 off2 := auxIntToInt32(v_0.AuxInt) 18275 base := v_0.Args[0] 18276 val := v_1 18277 mem := v_2 18278 if !(is32Bit(int64(off1) + int64(off2))) { 18279 break 18280 } 18281 v.reset(OpAMD64ORQmodify) 18282 v.AuxInt = int32ToAuxInt(off1 + off2) 18283 v.Aux = symToAux(sym) 18284 v.AddArg3(base, val, mem) 18285 return true 18286 } 18287 // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 18288 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 18289 // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 18290 for { 18291 off1 := auxIntToInt32(v.AuxInt) 18292 sym1 := auxToSym(v.Aux) 18293 if v_0.Op != OpAMD64LEAQ { 18294 break 18295 } 18296 off2 := auxIntToInt32(v_0.AuxInt) 18297 sym2 := auxToSym(v_0.Aux) 18298 base := v_0.Args[0] 18299 val := v_1 18300 mem := v_2 18301 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 18302 break 18303 } 18304 v.reset(OpAMD64ORQmodify) 18305 v.AuxInt = int32ToAuxInt(off1 + off2) 18306 v.Aux = symToAux(mergeSym(sym1, sym2)) 18307 v.AddArg3(base, val, mem) 18308 return true 18309 } 18310 return false 18311 } 18312 func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool { 18313 v_1 := v.Args[1] 18314 v_0 := v.Args[0] 18315 // match: (ROLB x (NEGQ y)) 18316 // result: (RORB x y) 18317 for { 18318 x := v_0 18319 if v_1.Op != OpAMD64NEGQ { 18320 break 18321 } 18322 y := v_1.Args[0] 18323 v.reset(OpAMD64RORB) 18324 v.AddArg2(x, y) 18325 return true 18326 } 18327 // match: (ROLB x (NEGL y)) 18328 // result: (RORB x y) 18329 for { 18330 x := v_0 18331 if v_1.Op != OpAMD64NEGL { 18332 break 18333 } 18334 y := v_1.Args[0] 18335 v.reset(OpAMD64RORB) 18336 v.AddArg2(x, y) 18337 return true 18338 } 18339 // match: (ROLB x (MOVQconst [c])) 18340 // result: (ROLBconst [int8(c&7) ] x) 18341 for { 18342 x := v_0 18343 if v_1.Op != OpAMD64MOVQconst { 18344 break 18345 } 18346 c := auxIntToInt64(v_1.AuxInt) 18347 v.reset(OpAMD64ROLBconst) 18348 v.AuxInt = int8ToAuxInt(int8(c & 7)) 18349 v.AddArg(x) 18350 return true 18351 } 18352 // match: (ROLB x (MOVLconst [c])) 18353 // result: (ROLBconst [int8(c&7) ] x) 18354 for { 18355 x := v_0 18356 if v_1.Op != OpAMD64MOVLconst { 18357 break 18358 } 18359 c := auxIntToInt32(v_1.AuxInt) 18360 v.reset(OpAMD64ROLBconst) 18361 v.AuxInt = int8ToAuxInt(int8(c & 7)) 18362 v.AddArg(x) 18363 return true 18364 } 18365 return false 18366 } 18367 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool { 18368 v_0 := v.Args[0] 18369 // match: (ROLBconst x [0]) 18370 // result: x 18371 for { 18372 if auxIntToInt8(v.AuxInt) != 0 { 18373 break 18374 } 18375 x := v_0 18376 v.copyOf(x) 18377 return true 18378 } 18379 return false 18380 } 18381 func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool { 18382 v_1 := v.Args[1] 18383 v_0 := v.Args[0] 18384 // match: (ROLL x (NEGQ y)) 18385 // result: (RORL x y) 18386 for { 18387 x := v_0 18388 if v_1.Op != OpAMD64NEGQ { 18389 break 18390 } 18391 y := v_1.Args[0] 18392 v.reset(OpAMD64RORL) 18393 v.AddArg2(x, y) 18394 return true 18395 } 18396 // match: (ROLL x (NEGL y)) 18397 // result: (RORL x y) 18398 for { 18399 x := v_0 18400 if v_1.Op != OpAMD64NEGL { 18401 break 18402 } 18403 y := v_1.Args[0] 18404 v.reset(OpAMD64RORL) 18405 v.AddArg2(x, y) 18406 return true 18407 } 18408 // match: (ROLL x (MOVQconst [c])) 18409 // result: (ROLLconst [int8(c&31)] x) 18410 for { 18411 x := v_0 18412 if v_1.Op != OpAMD64MOVQconst { 18413 break 18414 } 18415 c := auxIntToInt64(v_1.AuxInt) 18416 v.reset(OpAMD64ROLLconst) 18417 v.AuxInt = int8ToAuxInt(int8(c & 31)) 18418 v.AddArg(x) 18419 return true 18420 } 18421 // match: (ROLL x (MOVLconst [c])) 18422 // result: (ROLLconst [int8(c&31)] x) 18423 for { 18424 x := v_0 18425 if v_1.Op != OpAMD64MOVLconst { 18426 break 18427 } 18428 c := auxIntToInt32(v_1.AuxInt) 18429 v.reset(OpAMD64ROLLconst) 18430 v.AuxInt = int8ToAuxInt(int8(c & 31)) 18431 v.AddArg(x) 18432 return true 18433 } 18434 return false 18435 } 18436 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool { 18437 v_0 := v.Args[0] 18438 // match: (ROLLconst x [0]) 18439 // result: x 18440 for { 18441 if auxIntToInt8(v.AuxInt) != 0 { 18442 break 18443 } 18444 x := v_0 18445 v.copyOf(x) 18446 return true 18447 } 18448 return false 18449 } 18450 func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool { 18451 v_1 := v.Args[1] 18452 v_0 := v.Args[0] 18453 // match: (ROLQ x (NEGQ y)) 18454 // result: (RORQ x y) 18455 for { 18456 x := v_0 18457 if v_1.Op != OpAMD64NEGQ { 18458 break 18459 } 18460 y := v_1.Args[0] 18461 v.reset(OpAMD64RORQ) 18462 v.AddArg2(x, y) 18463 return true 18464 } 18465 // match: (ROLQ x (NEGL y)) 18466 // result: (RORQ x y) 18467 for { 18468 x := v_0 18469 if v_1.Op != OpAMD64NEGL { 18470 break 18471 } 18472 y := v_1.Args[0] 18473 v.reset(OpAMD64RORQ) 18474 v.AddArg2(x, y) 18475 return true 18476 } 18477 // match: (ROLQ x (MOVQconst [c])) 18478 // result: (ROLQconst [int8(c&63)] x) 18479 for { 18480 x := v_0 18481 if v_1.Op != OpAMD64MOVQconst { 18482 break 18483 } 18484 c := auxIntToInt64(v_1.AuxInt) 18485 v.reset(OpAMD64ROLQconst) 18486 v.AuxInt = int8ToAuxInt(int8(c & 63)) 18487 v.AddArg(x) 18488 return true 18489 } 18490 // match: (ROLQ x (MOVLconst [c])) 18491 // result: (ROLQconst [int8(c&63)] x) 18492 for { 18493 x := v_0 18494 if v_1.Op != OpAMD64MOVLconst { 18495 break 18496 } 18497 c := auxIntToInt32(v_1.AuxInt) 18498 v.reset(OpAMD64ROLQconst) 18499 v.AuxInt = int8ToAuxInt(int8(c & 63)) 18500 v.AddArg(x) 18501 return true 18502 } 18503 return false 18504 } 18505 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool { 18506 v_0 := v.Args[0] 18507 // match: (ROLQconst x [0]) 18508 // result: x 18509 for { 18510 if auxIntToInt8(v.AuxInt) != 0 { 18511 break 18512 } 18513 x := v_0 18514 v.copyOf(x) 18515 return true 18516 } 18517 return false 18518 } 18519 func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool { 18520 v_1 := v.Args[1] 18521 v_0 := v.Args[0] 18522 // match: (ROLW x (NEGQ y)) 18523 // result: (RORW x y) 18524 for { 18525 x := v_0 18526 if v_1.Op != OpAMD64NEGQ { 18527 break 18528 } 18529 y := v_1.Args[0] 18530 v.reset(OpAMD64RORW) 18531 v.AddArg2(x, y) 18532 return true 18533 } 18534 // match: (ROLW x (NEGL y)) 18535 // result: (RORW x y) 18536 for { 18537 x := v_0 18538 if v_1.Op != OpAMD64NEGL { 18539 break 18540 } 18541 y := v_1.Args[0] 18542 v.reset(OpAMD64RORW) 18543 v.AddArg2(x, y) 18544 return true 18545 } 18546 // match: (ROLW x (MOVQconst [c])) 18547 // result: (ROLWconst [int8(c&15)] x) 18548 for { 18549 x := v_0 18550 if v_1.Op != OpAMD64MOVQconst { 18551 break 18552 } 18553 c := auxIntToInt64(v_1.AuxInt) 18554 v.reset(OpAMD64ROLWconst) 18555 v.AuxInt = int8ToAuxInt(int8(c & 15)) 18556 v.AddArg(x) 18557 return true 18558 } 18559 // match: (ROLW x (MOVLconst [c])) 18560 // result: (ROLWconst [int8(c&15)] x) 18561 for { 18562 x := v_0 18563 if v_1.Op != OpAMD64MOVLconst { 18564 break 18565 } 18566 c := auxIntToInt32(v_1.AuxInt) 18567 v.reset(OpAMD64ROLWconst) 18568 v.AuxInt = int8ToAuxInt(int8(c & 15)) 18569 v.AddArg(x) 18570 return true 18571 } 18572 return false 18573 } 18574 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool { 18575 v_0 := v.Args[0] 18576 // match: (ROLWconst x [0]) 18577 // result: x 18578 for { 18579 if auxIntToInt8(v.AuxInt) != 0 { 18580 break 18581 } 18582 x := v_0 18583 v.copyOf(x) 18584 return true 18585 } 18586 return false 18587 } 18588 func rewriteValueAMD64_OpAMD64RORB(v *Value) bool { 18589 v_1 := v.Args[1] 18590 v_0 := v.Args[0] 18591 // match: (RORB x (NEGQ y)) 18592 // result: (ROLB x y) 18593 for { 18594 x := v_0 18595 if v_1.Op != OpAMD64NEGQ { 18596 break 18597 } 18598 y := v_1.Args[0] 18599 v.reset(OpAMD64ROLB) 18600 v.AddArg2(x, y) 18601 return true 18602 } 18603 // match: (RORB x (NEGL y)) 18604 // result: (ROLB x y) 18605 for { 18606 x := v_0 18607 if v_1.Op != OpAMD64NEGL { 18608 break 18609 } 18610 y := v_1.Args[0] 18611 v.reset(OpAMD64ROLB) 18612 v.AddArg2(x, y) 18613 return true 18614 } 18615 // match: (RORB x (MOVQconst [c])) 18616 // result: (ROLBconst [int8((-c)&7) ] x) 18617 for { 18618 x := v_0 18619 if v_1.Op != OpAMD64MOVQconst { 18620 break 18621 } 18622 c := auxIntToInt64(v_1.AuxInt) 18623 v.reset(OpAMD64ROLBconst) 18624 v.AuxInt = int8ToAuxInt(int8((-c) & 7)) 18625 v.AddArg(x) 18626 return true 18627 } 18628 // match: (RORB x (MOVLconst [c])) 18629 // result: (ROLBconst [int8((-c)&7) ] x) 18630 for { 18631 x := v_0 18632 if v_1.Op != OpAMD64MOVLconst { 18633 break 18634 } 18635 c := auxIntToInt32(v_1.AuxInt) 18636 v.reset(OpAMD64ROLBconst) 18637 v.AuxInt = int8ToAuxInt(int8((-c) & 7)) 18638 v.AddArg(x) 18639 return true 18640 } 18641 return false 18642 } 18643 func rewriteValueAMD64_OpAMD64RORL(v *Value) bool { 18644 v_1 := v.Args[1] 18645 v_0 := v.Args[0] 18646 // match: (RORL x (NEGQ y)) 18647 // result: (ROLL x y) 18648 for { 18649 x := v_0 18650 if v_1.Op != OpAMD64NEGQ { 18651 break 18652 } 18653 y := v_1.Args[0] 18654 v.reset(OpAMD64ROLL) 18655 v.AddArg2(x, y) 18656 return true 18657 } 18658 // match: (RORL x (NEGL y)) 18659 // result: (ROLL x y) 18660 for { 18661 x := v_0 18662 if v_1.Op != OpAMD64NEGL { 18663 break 18664 } 18665 y := v_1.Args[0] 18666 v.reset(OpAMD64ROLL) 18667 v.AddArg2(x, y) 18668 return true 18669 } 18670 // match: (RORL x (MOVQconst [c])) 18671 // result: (ROLLconst [int8((-c)&31)] x) 18672 for { 18673 x := v_0 18674 if v_1.Op != OpAMD64MOVQconst { 18675 break 18676 } 18677 c := auxIntToInt64(v_1.AuxInt) 18678 v.reset(OpAMD64ROLLconst) 18679 v.AuxInt = int8ToAuxInt(int8((-c) & 31)) 18680 v.AddArg(x) 18681 return true 18682 } 18683 // match: (RORL x (MOVLconst [c])) 18684 // result: (ROLLconst [int8((-c)&31)] x) 18685 for { 18686 x := v_0 18687 if v_1.Op != OpAMD64MOVLconst { 18688 break 18689 } 18690 c := auxIntToInt32(v_1.AuxInt) 18691 v.reset(OpAMD64ROLLconst) 18692 v.AuxInt = int8ToAuxInt(int8((-c) & 31)) 18693 v.AddArg(x) 18694 return true 18695 } 18696 return false 18697 } 18698 func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool { 18699 v_1 := v.Args[1] 18700 v_0 := v.Args[0] 18701 // match: (RORQ x (NEGQ y)) 18702 // result: (ROLQ x y) 18703 for { 18704 x := v_0 18705 if v_1.Op != OpAMD64NEGQ { 18706 break 18707 } 18708 y := v_1.Args[0] 18709 v.reset(OpAMD64ROLQ) 18710 v.AddArg2(x, y) 18711 return true 18712 } 18713 // match: (RORQ x (NEGL y)) 18714 // result: (ROLQ x y) 18715 for { 18716 x := v_0 18717 if v_1.Op != OpAMD64NEGL { 18718 break 18719 } 18720 y := v_1.Args[0] 18721 v.reset(OpAMD64ROLQ) 18722 v.AddArg2(x, y) 18723 return true 18724 } 18725 // match: (RORQ x (MOVQconst [c])) 18726 // result: (ROLQconst [int8((-c)&63)] x) 18727 for { 18728 x := v_0 18729 if v_1.Op != OpAMD64MOVQconst { 18730 break 18731 } 18732 c := auxIntToInt64(v_1.AuxInt) 18733 v.reset(OpAMD64ROLQconst) 18734 v.AuxInt = int8ToAuxInt(int8((-c) & 63)) 18735 v.AddArg(x) 18736 return true 18737 } 18738 // match: (RORQ x (MOVLconst [c])) 18739 // result: (ROLQconst [int8((-c)&63)] x) 18740 for { 18741 x := v_0 18742 if v_1.Op != OpAMD64MOVLconst { 18743 break 18744 } 18745 c := auxIntToInt32(v_1.AuxInt) 18746 v.reset(OpAMD64ROLQconst) 18747 v.AuxInt = int8ToAuxInt(int8((-c) & 63)) 18748 v.AddArg(x) 18749 return true 18750 } 18751 return false 18752 } 18753 func rewriteValueAMD64_OpAMD64RORW(v *Value) bool { 18754 v_1 := v.Args[1] 18755 v_0 := v.Args[0] 18756 // match: (RORW x (NEGQ y)) 18757 // result: (ROLW x y) 18758 for { 18759 x := v_0 18760 if v_1.Op != OpAMD64NEGQ { 18761 break 18762 } 18763 y := v_1.Args[0] 18764 v.reset(OpAMD64ROLW) 18765 v.AddArg2(x, y) 18766 return true 18767 } 18768 // match: (RORW x (NEGL y)) 18769 // result: (ROLW x y) 18770 for { 18771 x := v_0 18772 if v_1.Op != OpAMD64NEGL { 18773 break 18774 } 18775 y := v_1.Args[0] 18776 v.reset(OpAMD64ROLW) 18777 v.AddArg2(x, y) 18778 return true 18779 } 18780 // match: (RORW x (MOVQconst [c])) 18781 // result: (ROLWconst [int8((-c)&15)] x) 18782 for { 18783 x := v_0 18784 if v_1.Op != OpAMD64MOVQconst { 18785 break 18786 } 18787 c := auxIntToInt64(v_1.AuxInt) 18788 v.reset(OpAMD64ROLWconst) 18789 v.AuxInt = int8ToAuxInt(int8((-c) & 15)) 18790 v.AddArg(x) 18791 return true 18792 } 18793 // match: (RORW x (MOVLconst [c])) 18794 // result: (ROLWconst [int8((-c)&15)] x) 18795 for { 18796 x := v_0 18797 if v_1.Op != OpAMD64MOVLconst { 18798 break 18799 } 18800 c := auxIntToInt32(v_1.AuxInt) 18801 v.reset(OpAMD64ROLWconst) 18802 v.AuxInt = int8ToAuxInt(int8((-c) & 15)) 18803 v.AddArg(x) 18804 return true 18805 } 18806 return false 18807 } 18808 func rewriteValueAMD64_OpAMD64SARB(v *Value) bool { 18809 v_1 := v.Args[1] 18810 v_0 := v.Args[0] 18811 // match: (SARB x (MOVQconst [c])) 18812 // result: (SARBconst [int8(min(int64(c)&31,7))] x) 18813 for { 18814 x := v_0 18815 if v_1.Op != OpAMD64MOVQconst { 18816 break 18817 } 18818 c := auxIntToInt64(v_1.AuxInt) 18819 v.reset(OpAMD64SARBconst) 18820 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7))) 18821 v.AddArg(x) 18822 return true 18823 } 18824 // match: (SARB x (MOVLconst [c])) 18825 // result: (SARBconst [int8(min(int64(c)&31,7))] x) 18826 for { 18827 x := v_0 18828 if v_1.Op != OpAMD64MOVLconst { 18829 break 18830 } 18831 c := auxIntToInt32(v_1.AuxInt) 18832 v.reset(OpAMD64SARBconst) 18833 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7))) 18834 v.AddArg(x) 18835 return true 18836 } 18837 return false 18838 } 18839 func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool { 18840 v_0 := v.Args[0] 18841 // match: (SARBconst x [0]) 18842 // result: x 18843 for { 18844 if auxIntToInt8(v.AuxInt) != 0 { 18845 break 18846 } 18847 x := v_0 18848 v.copyOf(x) 18849 return true 18850 } 18851 // match: (SARBconst [c] (MOVQconst [d])) 18852 // result: (MOVQconst [int64(int8(d))>>uint64(c)]) 18853 for { 18854 c := auxIntToInt8(v.AuxInt) 18855 if v_0.Op != OpAMD64MOVQconst { 18856 break 18857 } 18858 d := auxIntToInt64(v_0.AuxInt) 18859 v.reset(OpAMD64MOVQconst) 18860 v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c)) 18861 return true 18862 } 18863 return false 18864 } 18865 func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { 18866 v_1 := v.Args[1] 18867 v_0 := v.Args[0] 18868 b := v.Block 18869 // match: (SARL x (MOVQconst [c])) 18870 // result: (SARLconst [int8(c&31)] x) 18871 for { 18872 x := v_0 18873 if v_1.Op != OpAMD64MOVQconst { 18874 break 18875 } 18876 c := auxIntToInt64(v_1.AuxInt) 18877 v.reset(OpAMD64SARLconst) 18878 v.AuxInt = int8ToAuxInt(int8(c & 31)) 18879 v.AddArg(x) 18880 return true 18881 } 18882 // match: (SARL x (MOVLconst [c])) 18883 // result: (SARLconst [int8(c&31)] x) 18884 for { 18885 x := v_0 18886 if v_1.Op != OpAMD64MOVLconst { 18887 break 18888 } 18889 c := auxIntToInt32(v_1.AuxInt) 18890 v.reset(OpAMD64SARLconst) 18891 v.AuxInt = int8ToAuxInt(int8(c & 31)) 18892 v.AddArg(x) 18893 return true 18894 } 18895 // match: (SARL x (ADDQconst [c] y)) 18896 // cond: c & 31 == 0 18897 // result: (SARL x y) 18898 for { 18899 x := v_0 18900 if v_1.Op != OpAMD64ADDQconst { 18901 break 18902 } 18903 c := auxIntToInt32(v_1.AuxInt) 18904 y := v_1.Args[0] 18905 if !(c&31 == 0) { 18906 break 18907 } 18908 v.reset(OpAMD64SARL) 18909 v.AddArg2(x, y) 18910 return true 18911 } 18912 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 18913 // cond: c & 31 == 0 18914 // result: (SARL x (NEGQ <t> y)) 18915 for { 18916 x := v_0 18917 if v_1.Op != OpAMD64NEGQ { 18918 break 18919 } 18920 t := v_1.Type 18921 v_1_0 := v_1.Args[0] 18922 if v_1_0.Op != OpAMD64ADDQconst { 18923 break 18924 } 18925 c := auxIntToInt32(v_1_0.AuxInt) 18926 y := v_1_0.Args[0] 18927 if !(c&31 == 0) { 18928 break 18929 } 18930 v.reset(OpAMD64SARL) 18931 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 18932 v0.AddArg(y) 18933 v.AddArg2(x, v0) 18934 return true 18935 } 18936 // match: (SARL x (ANDQconst [c] y)) 18937 // cond: c & 31 == 31 18938 // result: (SARL x y) 18939 for { 18940 x := v_0 18941 if v_1.Op != OpAMD64ANDQconst { 18942 break 18943 } 18944 c := auxIntToInt32(v_1.AuxInt) 18945 y := v_1.Args[0] 18946 if !(c&31 == 31) { 18947 break 18948 } 18949 v.reset(OpAMD64SARL) 18950 v.AddArg2(x, y) 18951 return true 18952 } 18953 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 18954 // cond: c & 31 == 31 18955 // result: (SARL x (NEGQ <t> y)) 18956 for { 18957 x := v_0 18958 if v_1.Op != OpAMD64NEGQ { 18959 break 18960 } 18961 t := v_1.Type 18962 v_1_0 := v_1.Args[0] 18963 if v_1_0.Op != OpAMD64ANDQconst { 18964 break 18965 } 18966 c := auxIntToInt32(v_1_0.AuxInt) 18967 y := v_1_0.Args[0] 18968 if !(c&31 == 31) { 18969 break 18970 } 18971 v.reset(OpAMD64SARL) 18972 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 18973 v0.AddArg(y) 18974 v.AddArg2(x, v0) 18975 return true 18976 } 18977 // match: (SARL x (ADDLconst [c] y)) 18978 // cond: c & 31 == 0 18979 // result: (SARL x y) 18980 for { 18981 x := v_0 18982 if v_1.Op != OpAMD64ADDLconst { 18983 break 18984 } 18985 c := auxIntToInt32(v_1.AuxInt) 18986 y := v_1.Args[0] 18987 if !(c&31 == 0) { 18988 break 18989 } 18990 v.reset(OpAMD64SARL) 18991 v.AddArg2(x, y) 18992 return true 18993 } 18994 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 18995 // cond: c & 31 == 0 18996 // result: (SARL x (NEGL <t> y)) 18997 for { 18998 x := v_0 18999 if v_1.Op != OpAMD64NEGL { 19000 break 19001 } 19002 t := v_1.Type 19003 v_1_0 := v_1.Args[0] 19004 if v_1_0.Op != OpAMD64ADDLconst { 19005 break 19006 } 19007 c := auxIntToInt32(v_1_0.AuxInt) 19008 y := v_1_0.Args[0] 19009 if !(c&31 == 0) { 19010 break 19011 } 19012 v.reset(OpAMD64SARL) 19013 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 19014 v0.AddArg(y) 19015 v.AddArg2(x, v0) 19016 return true 19017 } 19018 // match: (SARL x (ANDLconst [c] y)) 19019 // cond: c & 31 == 31 19020 // result: (SARL x y) 19021 for { 19022 x := v_0 19023 if v_1.Op != OpAMD64ANDLconst { 19024 break 19025 } 19026 c := auxIntToInt32(v_1.AuxInt) 19027 y := v_1.Args[0] 19028 if !(c&31 == 31) { 19029 break 19030 } 19031 v.reset(OpAMD64SARL) 19032 v.AddArg2(x, y) 19033 return true 19034 } 19035 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 19036 // cond: c & 31 == 31 19037 // result: (SARL x (NEGL <t> y)) 19038 for { 19039 x := v_0 19040 if v_1.Op != OpAMD64NEGL { 19041 break 19042 } 19043 t := v_1.Type 19044 v_1_0 := v_1.Args[0] 19045 if v_1_0.Op != OpAMD64ANDLconst { 19046 break 19047 } 19048 c := auxIntToInt32(v_1_0.AuxInt) 19049 y := v_1_0.Args[0] 19050 if !(c&31 == 31) { 19051 break 19052 } 19053 v.reset(OpAMD64SARL) 19054 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 19055 v0.AddArg(y) 19056 v.AddArg2(x, v0) 19057 return true 19058 } 19059 // match: (SARL l:(MOVLload [off] {sym} ptr mem) x) 19060 // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) 19061 // result: (SARXLload [off] {sym} ptr x mem) 19062 for { 19063 l := v_0 19064 if l.Op != OpAMD64MOVLload { 19065 break 19066 } 19067 off := auxIntToInt32(l.AuxInt) 19068 sym := auxToSym(l.Aux) 19069 mem := l.Args[1] 19070 ptr := l.Args[0] 19071 x := v_1 19072 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { 19073 break 19074 } 19075 v.reset(OpAMD64SARXLload) 19076 v.AuxInt = int32ToAuxInt(off) 19077 v.Aux = symToAux(sym) 19078 v.AddArg3(ptr, x, mem) 19079 return true 19080 } 19081 return false 19082 } 19083 func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool { 19084 v_0 := v.Args[0] 19085 // match: (SARLconst x [0]) 19086 // result: x 19087 for { 19088 if auxIntToInt8(v.AuxInt) != 0 { 19089 break 19090 } 19091 x := v_0 19092 v.copyOf(x) 19093 return true 19094 } 19095 // match: (SARLconst [c] (MOVQconst [d])) 19096 // result: (MOVQconst [int64(int32(d))>>uint64(c)]) 19097 for { 19098 c := auxIntToInt8(v.AuxInt) 19099 if v_0.Op != OpAMD64MOVQconst { 19100 break 19101 } 19102 d := auxIntToInt64(v_0.AuxInt) 19103 v.reset(OpAMD64MOVQconst) 19104 v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c)) 19105 return true 19106 } 19107 return false 19108 } 19109 func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { 19110 v_1 := v.Args[1] 19111 v_0 := v.Args[0] 19112 b := v.Block 19113 // match: (SARQ x (MOVQconst [c])) 19114 // result: (SARQconst [int8(c&63)] x) 19115 for { 19116 x := v_0 19117 if v_1.Op != OpAMD64MOVQconst { 19118 break 19119 } 19120 c := auxIntToInt64(v_1.AuxInt) 19121 v.reset(OpAMD64SARQconst) 19122 v.AuxInt = int8ToAuxInt(int8(c & 63)) 19123 v.AddArg(x) 19124 return true 19125 } 19126 // match: (SARQ x (MOVLconst [c])) 19127 // result: (SARQconst [int8(c&63)] x) 19128 for { 19129 x := v_0 19130 if v_1.Op != OpAMD64MOVLconst { 19131 break 19132 } 19133 c := auxIntToInt32(v_1.AuxInt) 19134 v.reset(OpAMD64SARQconst) 19135 v.AuxInt = int8ToAuxInt(int8(c & 63)) 19136 v.AddArg(x) 19137 return true 19138 } 19139 // match: (SARQ x (ADDQconst [c] y)) 19140 // cond: c & 63 == 0 19141 // result: (SARQ x y) 19142 for { 19143 x := v_0 19144 if v_1.Op != OpAMD64ADDQconst { 19145 break 19146 } 19147 c := auxIntToInt32(v_1.AuxInt) 19148 y := v_1.Args[0] 19149 if !(c&63 == 0) { 19150 break 19151 } 19152 v.reset(OpAMD64SARQ) 19153 v.AddArg2(x, y) 19154 return true 19155 } 19156 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 19157 // cond: c & 63 == 0 19158 // result: (SARQ x (NEGQ <t> y)) 19159 for { 19160 x := v_0 19161 if v_1.Op != OpAMD64NEGQ { 19162 break 19163 } 19164 t := v_1.Type 19165 v_1_0 := v_1.Args[0] 19166 if v_1_0.Op != OpAMD64ADDQconst { 19167 break 19168 } 19169 c := auxIntToInt32(v_1_0.AuxInt) 19170 y := v_1_0.Args[0] 19171 if !(c&63 == 0) { 19172 break 19173 } 19174 v.reset(OpAMD64SARQ) 19175 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 19176 v0.AddArg(y) 19177 v.AddArg2(x, v0) 19178 return true 19179 } 19180 // match: (SARQ x (ANDQconst [c] y)) 19181 // cond: c & 63 == 63 19182 // result: (SARQ x y) 19183 for { 19184 x := v_0 19185 if v_1.Op != OpAMD64ANDQconst { 19186 break 19187 } 19188 c := auxIntToInt32(v_1.AuxInt) 19189 y := v_1.Args[0] 19190 if !(c&63 == 63) { 19191 break 19192 } 19193 v.reset(OpAMD64SARQ) 19194 v.AddArg2(x, y) 19195 return true 19196 } 19197 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 19198 // cond: c & 63 == 63 19199 // result: (SARQ x (NEGQ <t> y)) 19200 for { 19201 x := v_0 19202 if v_1.Op != OpAMD64NEGQ { 19203 break 19204 } 19205 t := v_1.Type 19206 v_1_0 := v_1.Args[0] 19207 if v_1_0.Op != OpAMD64ANDQconst { 19208 break 19209 } 19210 c := auxIntToInt32(v_1_0.AuxInt) 19211 y := v_1_0.Args[0] 19212 if !(c&63 == 63) { 19213 break 19214 } 19215 v.reset(OpAMD64SARQ) 19216 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 19217 v0.AddArg(y) 19218 v.AddArg2(x, v0) 19219 return true 19220 } 19221 // match: (SARQ x (ADDLconst [c] y)) 19222 // cond: c & 63 == 0 19223 // result: (SARQ x y) 19224 for { 19225 x := v_0 19226 if v_1.Op != OpAMD64ADDLconst { 19227 break 19228 } 19229 c := auxIntToInt32(v_1.AuxInt) 19230 y := v_1.Args[0] 19231 if !(c&63 == 0) { 19232 break 19233 } 19234 v.reset(OpAMD64SARQ) 19235 v.AddArg2(x, y) 19236 return true 19237 } 19238 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 19239 // cond: c & 63 == 0 19240 // result: (SARQ x (NEGL <t> y)) 19241 for { 19242 x := v_0 19243 if v_1.Op != OpAMD64NEGL { 19244 break 19245 } 19246 t := v_1.Type 19247 v_1_0 := v_1.Args[0] 19248 if v_1_0.Op != OpAMD64ADDLconst { 19249 break 19250 } 19251 c := auxIntToInt32(v_1_0.AuxInt) 19252 y := v_1_0.Args[0] 19253 if !(c&63 == 0) { 19254 break 19255 } 19256 v.reset(OpAMD64SARQ) 19257 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 19258 v0.AddArg(y) 19259 v.AddArg2(x, v0) 19260 return true 19261 } 19262 // match: (SARQ x (ANDLconst [c] y)) 19263 // cond: c & 63 == 63 19264 // result: (SARQ x y) 19265 for { 19266 x := v_0 19267 if v_1.Op != OpAMD64ANDLconst { 19268 break 19269 } 19270 c := auxIntToInt32(v_1.AuxInt) 19271 y := v_1.Args[0] 19272 if !(c&63 == 63) { 19273 break 19274 } 19275 v.reset(OpAMD64SARQ) 19276 v.AddArg2(x, y) 19277 return true 19278 } 19279 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 19280 // cond: c & 63 == 63 19281 // result: (SARQ x (NEGL <t> y)) 19282 for { 19283 x := v_0 19284 if v_1.Op != OpAMD64NEGL { 19285 break 19286 } 19287 t := v_1.Type 19288 v_1_0 := v_1.Args[0] 19289 if v_1_0.Op != OpAMD64ANDLconst { 19290 break 19291 } 19292 c := auxIntToInt32(v_1_0.AuxInt) 19293 y := v_1_0.Args[0] 19294 if !(c&63 == 63) { 19295 break 19296 } 19297 v.reset(OpAMD64SARQ) 19298 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 19299 v0.AddArg(y) 19300 v.AddArg2(x, v0) 19301 return true 19302 } 19303 // match: (SARQ l:(MOVQload [off] {sym} ptr mem) x) 19304 // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) 19305 // result: (SARXQload [off] {sym} ptr x mem) 19306 for { 19307 l := v_0 19308 if l.Op != OpAMD64MOVQload { 19309 break 19310 } 19311 off := auxIntToInt32(l.AuxInt) 19312 sym := auxToSym(l.Aux) 19313 mem := l.Args[1] 19314 ptr := l.Args[0] 19315 x := v_1 19316 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { 19317 break 19318 } 19319 v.reset(OpAMD64SARXQload) 19320 v.AuxInt = int32ToAuxInt(off) 19321 v.Aux = symToAux(sym) 19322 v.AddArg3(ptr, x, mem) 19323 return true 19324 } 19325 return false 19326 } 19327 func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool { 19328 v_0 := v.Args[0] 19329 // match: (SARQconst x [0]) 19330 // result: x 19331 for { 19332 if auxIntToInt8(v.AuxInt) != 0 { 19333 break 19334 } 19335 x := v_0 19336 v.copyOf(x) 19337 return true 19338 } 19339 // match: (SARQconst [c] (MOVQconst [d])) 19340 // result: (MOVQconst [d>>uint64(c)]) 19341 for { 19342 c := auxIntToInt8(v.AuxInt) 19343 if v_0.Op != OpAMD64MOVQconst { 19344 break 19345 } 19346 d := auxIntToInt64(v_0.AuxInt) 19347 v.reset(OpAMD64MOVQconst) 19348 v.AuxInt = int64ToAuxInt(d >> uint64(c)) 19349 return true 19350 } 19351 return false 19352 } 19353 func rewriteValueAMD64_OpAMD64SARW(v *Value) bool { 19354 v_1 := v.Args[1] 19355 v_0 := v.Args[0] 19356 // match: (SARW x (MOVQconst [c])) 19357 // result: (SARWconst [int8(min(int64(c)&31,15))] x) 19358 for { 19359 x := v_0 19360 if v_1.Op != OpAMD64MOVQconst { 19361 break 19362 } 19363 c := auxIntToInt64(v_1.AuxInt) 19364 v.reset(OpAMD64SARWconst) 19365 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15))) 19366 v.AddArg(x) 19367 return true 19368 } 19369 // match: (SARW x (MOVLconst [c])) 19370 // result: (SARWconst [int8(min(int64(c)&31,15))] x) 19371 for { 19372 x := v_0 19373 if v_1.Op != OpAMD64MOVLconst { 19374 break 19375 } 19376 c := auxIntToInt32(v_1.AuxInt) 19377 v.reset(OpAMD64SARWconst) 19378 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15))) 19379 v.AddArg(x) 19380 return true 19381 } 19382 return false 19383 } 19384 func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool { 19385 v_0 := v.Args[0] 19386 // match: (SARWconst x [0]) 19387 // result: x 19388 for { 19389 if auxIntToInt8(v.AuxInt) != 0 { 19390 break 19391 } 19392 x := v_0 19393 v.copyOf(x) 19394 return true 19395 } 19396 // match: (SARWconst [c] (MOVQconst [d])) 19397 // result: (MOVQconst [int64(int16(d))>>uint64(c)]) 19398 for { 19399 c := auxIntToInt8(v.AuxInt) 19400 if v_0.Op != OpAMD64MOVQconst { 19401 break 19402 } 19403 d := auxIntToInt64(v_0.AuxInt) 19404 v.reset(OpAMD64MOVQconst) 19405 v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c)) 19406 return true 19407 } 19408 return false 19409 } 19410 func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool { 19411 v_2 := v.Args[2] 19412 v_1 := v.Args[1] 19413 v_0 := v.Args[0] 19414 b := v.Block 19415 typ := &b.Func.Config.Types 19416 // match: (SARXLload [off] {sym} ptr (MOVLconst [c]) mem) 19417 // result: (SARLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem)) 19418 for { 19419 off := auxIntToInt32(v.AuxInt) 19420 sym := auxToSym(v.Aux) 19421 ptr := v_0 19422 if v_1.Op != OpAMD64MOVLconst { 19423 break 19424 } 19425 c := auxIntToInt32(v_1.AuxInt) 19426 mem := v_2 19427 v.reset(OpAMD64SARLconst) 19428 v.AuxInt = int8ToAuxInt(int8(c & 31)) 19429 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 19430 v0.AuxInt = int32ToAuxInt(off) 19431 v0.Aux = symToAux(sym) 19432 v0.AddArg2(ptr, mem) 19433 v.AddArg(v0) 19434 return true 19435 } 19436 return false 19437 } 19438 func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool { 19439 v_2 := v.Args[2] 19440 v_1 := v.Args[1] 19441 v_0 := v.Args[0] 19442 b := v.Block 19443 typ := &b.Func.Config.Types 19444 // match: (SARXQload [off] {sym} ptr (MOVQconst [c]) mem) 19445 // result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) 19446 for { 19447 off := auxIntToInt32(v.AuxInt) 19448 sym := auxToSym(v.Aux) 19449 ptr := v_0 19450 if v_1.Op != OpAMD64MOVQconst { 19451 break 19452 } 19453 c := auxIntToInt64(v_1.AuxInt) 19454 mem := v_2 19455 v.reset(OpAMD64SARQconst) 19456 v.AuxInt = int8ToAuxInt(int8(c & 63)) 19457 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 19458 v0.AuxInt = int32ToAuxInt(off) 19459 v0.Aux = symToAux(sym) 19460 v0.AddArg2(ptr, mem) 19461 v.AddArg(v0) 19462 return true 19463 } 19464 // match: (SARXQload [off] {sym} ptr (MOVLconst [c]) mem) 19465 // result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) 19466 for { 19467 off := auxIntToInt32(v.AuxInt) 19468 sym := auxToSym(v.Aux) 19469 ptr := v_0 19470 if v_1.Op != OpAMD64MOVLconst { 19471 break 19472 } 19473 c := auxIntToInt32(v_1.AuxInt) 19474 mem := v_2 19475 v.reset(OpAMD64SARQconst) 19476 v.AuxInt = int8ToAuxInt(int8(c & 63)) 19477 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 19478 v0.AuxInt = int32ToAuxInt(off) 19479 v0.Aux = symToAux(sym) 19480 v0.AddArg2(ptr, mem) 19481 v.AddArg(v0) 19482 return true 19483 } 19484 return false 19485 } 19486 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool { 19487 v_0 := v.Args[0] 19488 // match: (SBBLcarrymask (FlagEQ)) 19489 // result: (MOVLconst [0]) 19490 for { 19491 if v_0.Op != OpAMD64FlagEQ { 19492 break 19493 } 19494 v.reset(OpAMD64MOVLconst) 19495 v.AuxInt = int32ToAuxInt(0) 19496 return true 19497 } 19498 // match: (SBBLcarrymask (FlagLT_ULT)) 19499 // result: (MOVLconst [-1]) 19500 for { 19501 if v_0.Op != OpAMD64FlagLT_ULT { 19502 break 19503 } 19504 v.reset(OpAMD64MOVLconst) 19505 v.AuxInt = int32ToAuxInt(-1) 19506 return true 19507 } 19508 // match: (SBBLcarrymask (FlagLT_UGT)) 19509 // result: (MOVLconst [0]) 19510 for { 19511 if v_0.Op != OpAMD64FlagLT_UGT { 19512 break 19513 } 19514 v.reset(OpAMD64MOVLconst) 19515 v.AuxInt = int32ToAuxInt(0) 19516 return true 19517 } 19518 // match: (SBBLcarrymask (FlagGT_ULT)) 19519 // result: (MOVLconst [-1]) 19520 for { 19521 if v_0.Op != OpAMD64FlagGT_ULT { 19522 break 19523 } 19524 v.reset(OpAMD64MOVLconst) 19525 v.AuxInt = int32ToAuxInt(-1) 19526 return true 19527 } 19528 // match: (SBBLcarrymask (FlagGT_UGT)) 19529 // result: (MOVLconst [0]) 19530 for { 19531 if v_0.Op != OpAMD64FlagGT_UGT { 19532 break 19533 } 19534 v.reset(OpAMD64MOVLconst) 19535 v.AuxInt = int32ToAuxInt(0) 19536 return true 19537 } 19538 return false 19539 } 19540 func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool { 19541 v_2 := v.Args[2] 19542 v_1 := v.Args[1] 19543 v_0 := v.Args[0] 19544 // match: (SBBQ x (MOVQconst [c]) borrow) 19545 // cond: is32Bit(c) 19546 // result: (SBBQconst x [int32(c)] borrow) 19547 for { 19548 x := v_0 19549 if v_1.Op != OpAMD64MOVQconst { 19550 break 19551 } 19552 c := auxIntToInt64(v_1.AuxInt) 19553 borrow := v_2 19554 if !(is32Bit(c)) { 19555 break 19556 } 19557 v.reset(OpAMD64SBBQconst) 19558 v.AuxInt = int32ToAuxInt(int32(c)) 19559 v.AddArg2(x, borrow) 19560 return true 19561 } 19562 // match: (SBBQ x y (FlagEQ)) 19563 // result: (SUBQborrow x y) 19564 for { 19565 x := v_0 19566 y := v_1 19567 if v_2.Op != OpAMD64FlagEQ { 19568 break 19569 } 19570 v.reset(OpAMD64SUBQborrow) 19571 v.AddArg2(x, y) 19572 return true 19573 } 19574 return false 19575 } 19576 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool { 19577 v_0 := v.Args[0] 19578 // match: (SBBQcarrymask (FlagEQ)) 19579 // result: (MOVQconst [0]) 19580 for { 19581 if v_0.Op != OpAMD64FlagEQ { 19582 break 19583 } 19584 v.reset(OpAMD64MOVQconst) 19585 v.AuxInt = int64ToAuxInt(0) 19586 return true 19587 } 19588 // match: (SBBQcarrymask (FlagLT_ULT)) 19589 // result: (MOVQconst [-1]) 19590 for { 19591 if v_0.Op != OpAMD64FlagLT_ULT { 19592 break 19593 } 19594 v.reset(OpAMD64MOVQconst) 19595 v.AuxInt = int64ToAuxInt(-1) 19596 return true 19597 } 19598 // match: (SBBQcarrymask (FlagLT_UGT)) 19599 // result: (MOVQconst [0]) 19600 for { 19601 if v_0.Op != OpAMD64FlagLT_UGT { 19602 break 19603 } 19604 v.reset(OpAMD64MOVQconst) 19605 v.AuxInt = int64ToAuxInt(0) 19606 return true 19607 } 19608 // match: (SBBQcarrymask (FlagGT_ULT)) 19609 // result: (MOVQconst [-1]) 19610 for { 19611 if v_0.Op != OpAMD64FlagGT_ULT { 19612 break 19613 } 19614 v.reset(OpAMD64MOVQconst) 19615 v.AuxInt = int64ToAuxInt(-1) 19616 return true 19617 } 19618 // match: (SBBQcarrymask (FlagGT_UGT)) 19619 // result: (MOVQconst [0]) 19620 for { 19621 if v_0.Op != OpAMD64FlagGT_UGT { 19622 break 19623 } 19624 v.reset(OpAMD64MOVQconst) 19625 v.AuxInt = int64ToAuxInt(0) 19626 return true 19627 } 19628 return false 19629 } 19630 func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool { 19631 v_1 := v.Args[1] 19632 v_0 := v.Args[0] 19633 // match: (SBBQconst x [c] (FlagEQ)) 19634 // result: (SUBQconstborrow x [c]) 19635 for { 19636 c := auxIntToInt32(v.AuxInt) 19637 x := v_0 19638 if v_1.Op != OpAMD64FlagEQ { 19639 break 19640 } 19641 v.reset(OpAMD64SUBQconstborrow) 19642 v.AuxInt = int32ToAuxInt(c) 19643 v.AddArg(x) 19644 return true 19645 } 19646 return false 19647 } 19648 func rewriteValueAMD64_OpAMD64SETA(v *Value) bool { 19649 v_0 := v.Args[0] 19650 // match: (SETA (InvertFlags x)) 19651 // result: (SETB x) 19652 for { 19653 if v_0.Op != OpAMD64InvertFlags { 19654 break 19655 } 19656 x := v_0.Args[0] 19657 v.reset(OpAMD64SETB) 19658 v.AddArg(x) 19659 return true 19660 } 19661 // match: (SETA (FlagEQ)) 19662 // result: (MOVLconst [0]) 19663 for { 19664 if v_0.Op != OpAMD64FlagEQ { 19665 break 19666 } 19667 v.reset(OpAMD64MOVLconst) 19668 v.AuxInt = int32ToAuxInt(0) 19669 return true 19670 } 19671 // match: (SETA (FlagLT_ULT)) 19672 // result: (MOVLconst [0]) 19673 for { 19674 if v_0.Op != OpAMD64FlagLT_ULT { 19675 break 19676 } 19677 v.reset(OpAMD64MOVLconst) 19678 v.AuxInt = int32ToAuxInt(0) 19679 return true 19680 } 19681 // match: (SETA (FlagLT_UGT)) 19682 // result: (MOVLconst [1]) 19683 for { 19684 if v_0.Op != OpAMD64FlagLT_UGT { 19685 break 19686 } 19687 v.reset(OpAMD64MOVLconst) 19688 v.AuxInt = int32ToAuxInt(1) 19689 return true 19690 } 19691 // match: (SETA (FlagGT_ULT)) 19692 // result: (MOVLconst [0]) 19693 for { 19694 if v_0.Op != OpAMD64FlagGT_ULT { 19695 break 19696 } 19697 v.reset(OpAMD64MOVLconst) 19698 v.AuxInt = int32ToAuxInt(0) 19699 return true 19700 } 19701 // match: (SETA (FlagGT_UGT)) 19702 // result: (MOVLconst [1]) 19703 for { 19704 if v_0.Op != OpAMD64FlagGT_UGT { 19705 break 19706 } 19707 v.reset(OpAMD64MOVLconst) 19708 v.AuxInt = int32ToAuxInt(1) 19709 return true 19710 } 19711 return false 19712 } 19713 func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { 19714 v_0 := v.Args[0] 19715 // match: (SETAE (TESTQ x x)) 19716 // result: (ConstBool [true]) 19717 for { 19718 if v_0.Op != OpAMD64TESTQ { 19719 break 19720 } 19721 x := v_0.Args[1] 19722 if x != v_0.Args[0] { 19723 break 19724 } 19725 v.reset(OpConstBool) 19726 v.AuxInt = boolToAuxInt(true) 19727 return true 19728 } 19729 // match: (SETAE (TESTL x x)) 19730 // result: (ConstBool [true]) 19731 for { 19732 if v_0.Op != OpAMD64TESTL { 19733 break 19734 } 19735 x := v_0.Args[1] 19736 if x != v_0.Args[0] { 19737 break 19738 } 19739 v.reset(OpConstBool) 19740 v.AuxInt = boolToAuxInt(true) 19741 return true 19742 } 19743 // match: (SETAE (TESTW x x)) 19744 // result: (ConstBool [true]) 19745 for { 19746 if v_0.Op != OpAMD64TESTW { 19747 break 19748 } 19749 x := v_0.Args[1] 19750 if x != v_0.Args[0] { 19751 break 19752 } 19753 v.reset(OpConstBool) 19754 v.AuxInt = boolToAuxInt(true) 19755 return true 19756 } 19757 // match: (SETAE (TESTB x x)) 19758 // result: (ConstBool [true]) 19759 for { 19760 if v_0.Op != OpAMD64TESTB { 19761 break 19762 } 19763 x := v_0.Args[1] 19764 if x != v_0.Args[0] { 19765 break 19766 } 19767 v.reset(OpConstBool) 19768 v.AuxInt = boolToAuxInt(true) 19769 return true 19770 } 19771 // match: (SETAE (InvertFlags x)) 19772 // result: (SETBE x) 19773 for { 19774 if v_0.Op != OpAMD64InvertFlags { 19775 break 19776 } 19777 x := v_0.Args[0] 19778 v.reset(OpAMD64SETBE) 19779 v.AddArg(x) 19780 return true 19781 } 19782 // match: (SETAE (FlagEQ)) 19783 // result: (MOVLconst [1]) 19784 for { 19785 if v_0.Op != OpAMD64FlagEQ { 19786 break 19787 } 19788 v.reset(OpAMD64MOVLconst) 19789 v.AuxInt = int32ToAuxInt(1) 19790 return true 19791 } 19792 // match: (SETAE (FlagLT_ULT)) 19793 // result: (MOVLconst [0]) 19794 for { 19795 if v_0.Op != OpAMD64FlagLT_ULT { 19796 break 19797 } 19798 v.reset(OpAMD64MOVLconst) 19799 v.AuxInt = int32ToAuxInt(0) 19800 return true 19801 } 19802 // match: (SETAE (FlagLT_UGT)) 19803 // result: (MOVLconst [1]) 19804 for { 19805 if v_0.Op != OpAMD64FlagLT_UGT { 19806 break 19807 } 19808 v.reset(OpAMD64MOVLconst) 19809 v.AuxInt = int32ToAuxInt(1) 19810 return true 19811 } 19812 // match: (SETAE (FlagGT_ULT)) 19813 // result: (MOVLconst [0]) 19814 for { 19815 if v_0.Op != OpAMD64FlagGT_ULT { 19816 break 19817 } 19818 v.reset(OpAMD64MOVLconst) 19819 v.AuxInt = int32ToAuxInt(0) 19820 return true 19821 } 19822 // match: (SETAE (FlagGT_UGT)) 19823 // result: (MOVLconst [1]) 19824 for { 19825 if v_0.Op != OpAMD64FlagGT_UGT { 19826 break 19827 } 19828 v.reset(OpAMD64MOVLconst) 19829 v.AuxInt = int32ToAuxInt(1) 19830 return true 19831 } 19832 return false 19833 } 19834 func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { 19835 v_2 := v.Args[2] 19836 v_1 := v.Args[1] 19837 v_0 := v.Args[0] 19838 b := v.Block 19839 typ := &b.Func.Config.Types 19840 // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem) 19841 // result: (SETBEstore [off] {sym} ptr x mem) 19842 for { 19843 off := auxIntToInt32(v.AuxInt) 19844 sym := auxToSym(v.Aux) 19845 ptr := v_0 19846 if v_1.Op != OpAMD64InvertFlags { 19847 break 19848 } 19849 x := v_1.Args[0] 19850 mem := v_2 19851 v.reset(OpAMD64SETBEstore) 19852 v.AuxInt = int32ToAuxInt(off) 19853 v.Aux = symToAux(sym) 19854 v.AddArg3(ptr, x, mem) 19855 return true 19856 } 19857 // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem) 19858 // cond: is32Bit(int64(off1)+int64(off2)) 19859 // result: (SETAEstore [off1+off2] {sym} base val mem) 19860 for { 19861 off1 := auxIntToInt32(v.AuxInt) 19862 sym := auxToSym(v.Aux) 19863 if v_0.Op != OpAMD64ADDQconst { 19864 break 19865 } 19866 off2 := auxIntToInt32(v_0.AuxInt) 19867 base := v_0.Args[0] 19868 val := v_1 19869 mem := v_2 19870 if !(is32Bit(int64(off1) + int64(off2))) { 19871 break 19872 } 19873 v.reset(OpAMD64SETAEstore) 19874 v.AuxInt = int32ToAuxInt(off1 + off2) 19875 v.Aux = symToAux(sym) 19876 v.AddArg3(base, val, mem) 19877 return true 19878 } 19879 // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 19880 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 19881 // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 19882 for { 19883 off1 := auxIntToInt32(v.AuxInt) 19884 sym1 := auxToSym(v.Aux) 19885 if v_0.Op != OpAMD64LEAQ { 19886 break 19887 } 19888 off2 := auxIntToInt32(v_0.AuxInt) 19889 sym2 := auxToSym(v_0.Aux) 19890 base := v_0.Args[0] 19891 val := v_1 19892 mem := v_2 19893 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 19894 break 19895 } 19896 v.reset(OpAMD64SETAEstore) 19897 v.AuxInt = int32ToAuxInt(off1 + off2) 19898 v.Aux = symToAux(mergeSym(sym1, sym2)) 19899 v.AddArg3(base, val, mem) 19900 return true 19901 } 19902 // match: (SETAEstore [off] {sym} ptr (FlagEQ) mem) 19903 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 19904 for { 19905 off := auxIntToInt32(v.AuxInt) 19906 sym := auxToSym(v.Aux) 19907 ptr := v_0 19908 if v_1.Op != OpAMD64FlagEQ { 19909 break 19910 } 19911 mem := v_2 19912 v.reset(OpAMD64MOVBstore) 19913 v.AuxInt = int32ToAuxInt(off) 19914 v.Aux = symToAux(sym) 19915 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19916 v0.AuxInt = int32ToAuxInt(1) 19917 v.AddArg3(ptr, v0, mem) 19918 return true 19919 } 19920 // match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) 19921 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 19922 for { 19923 off := auxIntToInt32(v.AuxInt) 19924 sym := auxToSym(v.Aux) 19925 ptr := v_0 19926 if v_1.Op != OpAMD64FlagLT_ULT { 19927 break 19928 } 19929 mem := v_2 19930 v.reset(OpAMD64MOVBstore) 19931 v.AuxInt = int32ToAuxInt(off) 19932 v.Aux = symToAux(sym) 19933 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19934 v0.AuxInt = int32ToAuxInt(0) 19935 v.AddArg3(ptr, v0, mem) 19936 return true 19937 } 19938 // match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) 19939 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 19940 for { 19941 off := auxIntToInt32(v.AuxInt) 19942 sym := auxToSym(v.Aux) 19943 ptr := v_0 19944 if v_1.Op != OpAMD64FlagLT_UGT { 19945 break 19946 } 19947 mem := v_2 19948 v.reset(OpAMD64MOVBstore) 19949 v.AuxInt = int32ToAuxInt(off) 19950 v.Aux = symToAux(sym) 19951 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19952 v0.AuxInt = int32ToAuxInt(1) 19953 v.AddArg3(ptr, v0, mem) 19954 return true 19955 } 19956 // match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) 19957 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 19958 for { 19959 off := auxIntToInt32(v.AuxInt) 19960 sym := auxToSym(v.Aux) 19961 ptr := v_0 19962 if v_1.Op != OpAMD64FlagGT_ULT { 19963 break 19964 } 19965 mem := v_2 19966 v.reset(OpAMD64MOVBstore) 19967 v.AuxInt = int32ToAuxInt(off) 19968 v.Aux = symToAux(sym) 19969 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19970 v0.AuxInt = int32ToAuxInt(0) 19971 v.AddArg3(ptr, v0, mem) 19972 return true 19973 } 19974 // match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) 19975 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 19976 for { 19977 off := auxIntToInt32(v.AuxInt) 19978 sym := auxToSym(v.Aux) 19979 ptr := v_0 19980 if v_1.Op != OpAMD64FlagGT_UGT { 19981 break 19982 } 19983 mem := v_2 19984 v.reset(OpAMD64MOVBstore) 19985 v.AuxInt = int32ToAuxInt(off) 19986 v.Aux = symToAux(sym) 19987 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19988 v0.AuxInt = int32ToAuxInt(1) 19989 v.AddArg3(ptr, v0, mem) 19990 return true 19991 } 19992 return false 19993 } 19994 func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { 19995 v_2 := v.Args[2] 19996 v_1 := v.Args[1] 19997 v_0 := v.Args[0] 19998 b := v.Block 19999 typ := &b.Func.Config.Types 20000 // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem) 20001 // result: (SETBstore [off] {sym} ptr x mem) 20002 for { 20003 off := auxIntToInt32(v.AuxInt) 20004 sym := auxToSym(v.Aux) 20005 ptr := v_0 20006 if v_1.Op != OpAMD64InvertFlags { 20007 break 20008 } 20009 x := v_1.Args[0] 20010 mem := v_2 20011 v.reset(OpAMD64SETBstore) 20012 v.AuxInt = int32ToAuxInt(off) 20013 v.Aux = symToAux(sym) 20014 v.AddArg3(ptr, x, mem) 20015 return true 20016 } 20017 // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem) 20018 // cond: is32Bit(int64(off1)+int64(off2)) 20019 // result: (SETAstore [off1+off2] {sym} base val mem) 20020 for { 20021 off1 := auxIntToInt32(v.AuxInt) 20022 sym := auxToSym(v.Aux) 20023 if v_0.Op != OpAMD64ADDQconst { 20024 break 20025 } 20026 off2 := auxIntToInt32(v_0.AuxInt) 20027 base := v_0.Args[0] 20028 val := v_1 20029 mem := v_2 20030 if !(is32Bit(int64(off1) + int64(off2))) { 20031 break 20032 } 20033 v.reset(OpAMD64SETAstore) 20034 v.AuxInt = int32ToAuxInt(off1 + off2) 20035 v.Aux = symToAux(sym) 20036 v.AddArg3(base, val, mem) 20037 return true 20038 } 20039 // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 20040 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 20041 // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 20042 for { 20043 off1 := auxIntToInt32(v.AuxInt) 20044 sym1 := auxToSym(v.Aux) 20045 if v_0.Op != OpAMD64LEAQ { 20046 break 20047 } 20048 off2 := auxIntToInt32(v_0.AuxInt) 20049 sym2 := auxToSym(v_0.Aux) 20050 base := v_0.Args[0] 20051 val := v_1 20052 mem := v_2 20053 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 20054 break 20055 } 20056 v.reset(OpAMD64SETAstore) 20057 v.AuxInt = int32ToAuxInt(off1 + off2) 20058 v.Aux = symToAux(mergeSym(sym1, sym2)) 20059 v.AddArg3(base, val, mem) 20060 return true 20061 } 20062 // match: (SETAstore [off] {sym} ptr (FlagEQ) mem) 20063 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 20064 for { 20065 off := auxIntToInt32(v.AuxInt) 20066 sym := auxToSym(v.Aux) 20067 ptr := v_0 20068 if v_1.Op != OpAMD64FlagEQ { 20069 break 20070 } 20071 mem := v_2 20072 v.reset(OpAMD64MOVBstore) 20073 v.AuxInt = int32ToAuxInt(off) 20074 v.Aux = symToAux(sym) 20075 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20076 v0.AuxInt = int32ToAuxInt(0) 20077 v.AddArg3(ptr, v0, mem) 20078 return true 20079 } 20080 // match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem) 20081 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 20082 for { 20083 off := auxIntToInt32(v.AuxInt) 20084 sym := auxToSym(v.Aux) 20085 ptr := v_0 20086 if v_1.Op != OpAMD64FlagLT_ULT { 20087 break 20088 } 20089 mem := v_2 20090 v.reset(OpAMD64MOVBstore) 20091 v.AuxInt = int32ToAuxInt(off) 20092 v.Aux = symToAux(sym) 20093 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20094 v0.AuxInt = int32ToAuxInt(0) 20095 v.AddArg3(ptr, v0, mem) 20096 return true 20097 } 20098 // match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem) 20099 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 20100 for { 20101 off := auxIntToInt32(v.AuxInt) 20102 sym := auxToSym(v.Aux) 20103 ptr := v_0 20104 if v_1.Op != OpAMD64FlagLT_UGT { 20105 break 20106 } 20107 mem := v_2 20108 v.reset(OpAMD64MOVBstore) 20109 v.AuxInt = int32ToAuxInt(off) 20110 v.Aux = symToAux(sym) 20111 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20112 v0.AuxInt = int32ToAuxInt(1) 20113 v.AddArg3(ptr, v0, mem) 20114 return true 20115 } 20116 // match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem) 20117 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 20118 for { 20119 off := auxIntToInt32(v.AuxInt) 20120 sym := auxToSym(v.Aux) 20121 ptr := v_0 20122 if v_1.Op != OpAMD64FlagGT_ULT { 20123 break 20124 } 20125 mem := v_2 20126 v.reset(OpAMD64MOVBstore) 20127 v.AuxInt = int32ToAuxInt(off) 20128 v.Aux = symToAux(sym) 20129 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20130 v0.AuxInt = int32ToAuxInt(0) 20131 v.AddArg3(ptr, v0, mem) 20132 return true 20133 } 20134 // match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem) 20135 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 20136 for { 20137 off := auxIntToInt32(v.AuxInt) 20138 sym := auxToSym(v.Aux) 20139 ptr := v_0 20140 if v_1.Op != OpAMD64FlagGT_UGT { 20141 break 20142 } 20143 mem := v_2 20144 v.reset(OpAMD64MOVBstore) 20145 v.AuxInt = int32ToAuxInt(off) 20146 v.Aux = symToAux(sym) 20147 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20148 v0.AuxInt = int32ToAuxInt(1) 20149 v.AddArg3(ptr, v0, mem) 20150 return true 20151 } 20152 return false 20153 } 20154 func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { 20155 v_0 := v.Args[0] 20156 // match: (SETB (TESTQ x x)) 20157 // result: (ConstBool [false]) 20158 for { 20159 if v_0.Op != OpAMD64TESTQ { 20160 break 20161 } 20162 x := v_0.Args[1] 20163 if x != v_0.Args[0] { 20164 break 20165 } 20166 v.reset(OpConstBool) 20167 v.AuxInt = boolToAuxInt(false) 20168 return true 20169 } 20170 // match: (SETB (TESTL x x)) 20171 // result: (ConstBool [false]) 20172 for { 20173 if v_0.Op != OpAMD64TESTL { 20174 break 20175 } 20176 x := v_0.Args[1] 20177 if x != v_0.Args[0] { 20178 break 20179 } 20180 v.reset(OpConstBool) 20181 v.AuxInt = boolToAuxInt(false) 20182 return true 20183 } 20184 // match: (SETB (TESTW x x)) 20185 // result: (ConstBool [false]) 20186 for { 20187 if v_0.Op != OpAMD64TESTW { 20188 break 20189 } 20190 x := v_0.Args[1] 20191 if x != v_0.Args[0] { 20192 break 20193 } 20194 v.reset(OpConstBool) 20195 v.AuxInt = boolToAuxInt(false) 20196 return true 20197 } 20198 // match: (SETB (TESTB x x)) 20199 // result: (ConstBool [false]) 20200 for { 20201 if v_0.Op != OpAMD64TESTB { 20202 break 20203 } 20204 x := v_0.Args[1] 20205 if x != v_0.Args[0] { 20206 break 20207 } 20208 v.reset(OpConstBool) 20209 v.AuxInt = boolToAuxInt(false) 20210 return true 20211 } 20212 // match: (SETB (BTLconst [0] x)) 20213 // result: (ANDLconst [1] x) 20214 for { 20215 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 { 20216 break 20217 } 20218 x := v_0.Args[0] 20219 v.reset(OpAMD64ANDLconst) 20220 v.AuxInt = int32ToAuxInt(1) 20221 v.AddArg(x) 20222 return true 20223 } 20224 // match: (SETB (BTQconst [0] x)) 20225 // result: (ANDQconst [1] x) 20226 for { 20227 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 { 20228 break 20229 } 20230 x := v_0.Args[0] 20231 v.reset(OpAMD64ANDQconst) 20232 v.AuxInt = int32ToAuxInt(1) 20233 v.AddArg(x) 20234 return true 20235 } 20236 // match: (SETB (InvertFlags x)) 20237 // result: (SETA x) 20238 for { 20239 if v_0.Op != OpAMD64InvertFlags { 20240 break 20241 } 20242 x := v_0.Args[0] 20243 v.reset(OpAMD64SETA) 20244 v.AddArg(x) 20245 return true 20246 } 20247 // match: (SETB (FlagEQ)) 20248 // result: (MOVLconst [0]) 20249 for { 20250 if v_0.Op != OpAMD64FlagEQ { 20251 break 20252 } 20253 v.reset(OpAMD64MOVLconst) 20254 v.AuxInt = int32ToAuxInt(0) 20255 return true 20256 } 20257 // match: (SETB (FlagLT_ULT)) 20258 // result: (MOVLconst [1]) 20259 for { 20260 if v_0.Op != OpAMD64FlagLT_ULT { 20261 break 20262 } 20263 v.reset(OpAMD64MOVLconst) 20264 v.AuxInt = int32ToAuxInt(1) 20265 return true 20266 } 20267 // match: (SETB (FlagLT_UGT)) 20268 // result: (MOVLconst [0]) 20269 for { 20270 if v_0.Op != OpAMD64FlagLT_UGT { 20271 break 20272 } 20273 v.reset(OpAMD64MOVLconst) 20274 v.AuxInt = int32ToAuxInt(0) 20275 return true 20276 } 20277 // match: (SETB (FlagGT_ULT)) 20278 // result: (MOVLconst [1]) 20279 for { 20280 if v_0.Op != OpAMD64FlagGT_ULT { 20281 break 20282 } 20283 v.reset(OpAMD64MOVLconst) 20284 v.AuxInt = int32ToAuxInt(1) 20285 return true 20286 } 20287 // match: (SETB (FlagGT_UGT)) 20288 // result: (MOVLconst [0]) 20289 for { 20290 if v_0.Op != OpAMD64FlagGT_UGT { 20291 break 20292 } 20293 v.reset(OpAMD64MOVLconst) 20294 v.AuxInt = int32ToAuxInt(0) 20295 return true 20296 } 20297 return false 20298 } 20299 func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool { 20300 v_0 := v.Args[0] 20301 // match: (SETBE (InvertFlags x)) 20302 // result: (SETAE x) 20303 for { 20304 if v_0.Op != OpAMD64InvertFlags { 20305 break 20306 } 20307 x := v_0.Args[0] 20308 v.reset(OpAMD64SETAE) 20309 v.AddArg(x) 20310 return true 20311 } 20312 // match: (SETBE (FlagEQ)) 20313 // result: (MOVLconst [1]) 20314 for { 20315 if v_0.Op != OpAMD64FlagEQ { 20316 break 20317 } 20318 v.reset(OpAMD64MOVLconst) 20319 v.AuxInt = int32ToAuxInt(1) 20320 return true 20321 } 20322 // match: (SETBE (FlagLT_ULT)) 20323 // result: (MOVLconst [1]) 20324 for { 20325 if v_0.Op != OpAMD64FlagLT_ULT { 20326 break 20327 } 20328 v.reset(OpAMD64MOVLconst) 20329 v.AuxInt = int32ToAuxInt(1) 20330 return true 20331 } 20332 // match: (SETBE (FlagLT_UGT)) 20333 // result: (MOVLconst [0]) 20334 for { 20335 if v_0.Op != OpAMD64FlagLT_UGT { 20336 break 20337 } 20338 v.reset(OpAMD64MOVLconst) 20339 v.AuxInt = int32ToAuxInt(0) 20340 return true 20341 } 20342 // match: (SETBE (FlagGT_ULT)) 20343 // result: (MOVLconst [1]) 20344 for { 20345 if v_0.Op != OpAMD64FlagGT_ULT { 20346 break 20347 } 20348 v.reset(OpAMD64MOVLconst) 20349 v.AuxInt = int32ToAuxInt(1) 20350 return true 20351 } 20352 // match: (SETBE (FlagGT_UGT)) 20353 // result: (MOVLconst [0]) 20354 for { 20355 if v_0.Op != OpAMD64FlagGT_UGT { 20356 break 20357 } 20358 v.reset(OpAMD64MOVLconst) 20359 v.AuxInt = int32ToAuxInt(0) 20360 return true 20361 } 20362 return false 20363 } 20364 func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { 20365 v_2 := v.Args[2] 20366 v_1 := v.Args[1] 20367 v_0 := v.Args[0] 20368 b := v.Block 20369 typ := &b.Func.Config.Types 20370 // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem) 20371 // result: (SETAEstore [off] {sym} ptr x mem) 20372 for { 20373 off := auxIntToInt32(v.AuxInt) 20374 sym := auxToSym(v.Aux) 20375 ptr := v_0 20376 if v_1.Op != OpAMD64InvertFlags { 20377 break 20378 } 20379 x := v_1.Args[0] 20380 mem := v_2 20381 v.reset(OpAMD64SETAEstore) 20382 v.AuxInt = int32ToAuxInt(off) 20383 v.Aux = symToAux(sym) 20384 v.AddArg3(ptr, x, mem) 20385 return true 20386 } 20387 // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem) 20388 // cond: is32Bit(int64(off1)+int64(off2)) 20389 // result: (SETBEstore [off1+off2] {sym} base val mem) 20390 for { 20391 off1 := auxIntToInt32(v.AuxInt) 20392 sym := auxToSym(v.Aux) 20393 if v_0.Op != OpAMD64ADDQconst { 20394 break 20395 } 20396 off2 := auxIntToInt32(v_0.AuxInt) 20397 base := v_0.Args[0] 20398 val := v_1 20399 mem := v_2 20400 if !(is32Bit(int64(off1) + int64(off2))) { 20401 break 20402 } 20403 v.reset(OpAMD64SETBEstore) 20404 v.AuxInt = int32ToAuxInt(off1 + off2) 20405 v.Aux = symToAux(sym) 20406 v.AddArg3(base, val, mem) 20407 return true 20408 } 20409 // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 20410 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 20411 // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 20412 for { 20413 off1 := auxIntToInt32(v.AuxInt) 20414 sym1 := auxToSym(v.Aux) 20415 if v_0.Op != OpAMD64LEAQ { 20416 break 20417 } 20418 off2 := auxIntToInt32(v_0.AuxInt) 20419 sym2 := auxToSym(v_0.Aux) 20420 base := v_0.Args[0] 20421 val := v_1 20422 mem := v_2 20423 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 20424 break 20425 } 20426 v.reset(OpAMD64SETBEstore) 20427 v.AuxInt = int32ToAuxInt(off1 + off2) 20428 v.Aux = symToAux(mergeSym(sym1, sym2)) 20429 v.AddArg3(base, val, mem) 20430 return true 20431 } 20432 // match: (SETBEstore [off] {sym} ptr (FlagEQ) mem) 20433 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 20434 for { 20435 off := auxIntToInt32(v.AuxInt) 20436 sym := auxToSym(v.Aux) 20437 ptr := v_0 20438 if v_1.Op != OpAMD64FlagEQ { 20439 break 20440 } 20441 mem := v_2 20442 v.reset(OpAMD64MOVBstore) 20443 v.AuxInt = int32ToAuxInt(off) 20444 v.Aux = symToAux(sym) 20445 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20446 v0.AuxInt = int32ToAuxInt(1) 20447 v.AddArg3(ptr, v0, mem) 20448 return true 20449 } 20450 // match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) 20451 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 20452 for { 20453 off := auxIntToInt32(v.AuxInt) 20454 sym := auxToSym(v.Aux) 20455 ptr := v_0 20456 if v_1.Op != OpAMD64FlagLT_ULT { 20457 break 20458 } 20459 mem := v_2 20460 v.reset(OpAMD64MOVBstore) 20461 v.AuxInt = int32ToAuxInt(off) 20462 v.Aux = symToAux(sym) 20463 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20464 v0.AuxInt = int32ToAuxInt(1) 20465 v.AddArg3(ptr, v0, mem) 20466 return true 20467 } 20468 // match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) 20469 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 20470 for { 20471 off := auxIntToInt32(v.AuxInt) 20472 sym := auxToSym(v.Aux) 20473 ptr := v_0 20474 if v_1.Op != OpAMD64FlagLT_UGT { 20475 break 20476 } 20477 mem := v_2 20478 v.reset(OpAMD64MOVBstore) 20479 v.AuxInt = int32ToAuxInt(off) 20480 v.Aux = symToAux(sym) 20481 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20482 v0.AuxInt = int32ToAuxInt(0) 20483 v.AddArg3(ptr, v0, mem) 20484 return true 20485 } 20486 // match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) 20487 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 20488 for { 20489 off := auxIntToInt32(v.AuxInt) 20490 sym := auxToSym(v.Aux) 20491 ptr := v_0 20492 if v_1.Op != OpAMD64FlagGT_ULT { 20493 break 20494 } 20495 mem := v_2 20496 v.reset(OpAMD64MOVBstore) 20497 v.AuxInt = int32ToAuxInt(off) 20498 v.Aux = symToAux(sym) 20499 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20500 v0.AuxInt = int32ToAuxInt(1) 20501 v.AddArg3(ptr, v0, mem) 20502 return true 20503 } 20504 // match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) 20505 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 20506 for { 20507 off := auxIntToInt32(v.AuxInt) 20508 sym := auxToSym(v.Aux) 20509 ptr := v_0 20510 if v_1.Op != OpAMD64FlagGT_UGT { 20511 break 20512 } 20513 mem := v_2 20514 v.reset(OpAMD64MOVBstore) 20515 v.AuxInt = int32ToAuxInt(off) 20516 v.Aux = symToAux(sym) 20517 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20518 v0.AuxInt = int32ToAuxInt(0) 20519 v.AddArg3(ptr, v0, mem) 20520 return true 20521 } 20522 return false 20523 } 20524 func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { 20525 v_2 := v.Args[2] 20526 v_1 := v.Args[1] 20527 v_0 := v.Args[0] 20528 b := v.Block 20529 typ := &b.Func.Config.Types 20530 // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem) 20531 // result: (SETAstore [off] {sym} ptr x mem) 20532 for { 20533 off := auxIntToInt32(v.AuxInt) 20534 sym := auxToSym(v.Aux) 20535 ptr := v_0 20536 if v_1.Op != OpAMD64InvertFlags { 20537 break 20538 } 20539 x := v_1.Args[0] 20540 mem := v_2 20541 v.reset(OpAMD64SETAstore) 20542 v.AuxInt = int32ToAuxInt(off) 20543 v.Aux = symToAux(sym) 20544 v.AddArg3(ptr, x, mem) 20545 return true 20546 } 20547 // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem) 20548 // cond: is32Bit(int64(off1)+int64(off2)) 20549 // result: (SETBstore [off1+off2] {sym} base val mem) 20550 for { 20551 off1 := auxIntToInt32(v.AuxInt) 20552 sym := auxToSym(v.Aux) 20553 if v_0.Op != OpAMD64ADDQconst { 20554 break 20555 } 20556 off2 := auxIntToInt32(v_0.AuxInt) 20557 base := v_0.Args[0] 20558 val := v_1 20559 mem := v_2 20560 if !(is32Bit(int64(off1) + int64(off2))) { 20561 break 20562 } 20563 v.reset(OpAMD64SETBstore) 20564 v.AuxInt = int32ToAuxInt(off1 + off2) 20565 v.Aux = symToAux(sym) 20566 v.AddArg3(base, val, mem) 20567 return true 20568 } 20569 // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 20570 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 20571 // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 20572 for { 20573 off1 := auxIntToInt32(v.AuxInt) 20574 sym1 := auxToSym(v.Aux) 20575 if v_0.Op != OpAMD64LEAQ { 20576 break 20577 } 20578 off2 := auxIntToInt32(v_0.AuxInt) 20579 sym2 := auxToSym(v_0.Aux) 20580 base := v_0.Args[0] 20581 val := v_1 20582 mem := v_2 20583 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 20584 break 20585 } 20586 v.reset(OpAMD64SETBstore) 20587 v.AuxInt = int32ToAuxInt(off1 + off2) 20588 v.Aux = symToAux(mergeSym(sym1, sym2)) 20589 v.AddArg3(base, val, mem) 20590 return true 20591 } 20592 // match: (SETBstore [off] {sym} ptr (FlagEQ) mem) 20593 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 20594 for { 20595 off := auxIntToInt32(v.AuxInt) 20596 sym := auxToSym(v.Aux) 20597 ptr := v_0 20598 if v_1.Op != OpAMD64FlagEQ { 20599 break 20600 } 20601 mem := v_2 20602 v.reset(OpAMD64MOVBstore) 20603 v.AuxInt = int32ToAuxInt(off) 20604 v.Aux = symToAux(sym) 20605 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20606 v0.AuxInt = int32ToAuxInt(0) 20607 v.AddArg3(ptr, v0, mem) 20608 return true 20609 } 20610 // match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem) 20611 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 20612 for { 20613 off := auxIntToInt32(v.AuxInt) 20614 sym := auxToSym(v.Aux) 20615 ptr := v_0 20616 if v_1.Op != OpAMD64FlagLT_ULT { 20617 break 20618 } 20619 mem := v_2 20620 v.reset(OpAMD64MOVBstore) 20621 v.AuxInt = int32ToAuxInt(off) 20622 v.Aux = symToAux(sym) 20623 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20624 v0.AuxInt = int32ToAuxInt(1) 20625 v.AddArg3(ptr, v0, mem) 20626 return true 20627 } 20628 // match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem) 20629 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 20630 for { 20631 off := auxIntToInt32(v.AuxInt) 20632 sym := auxToSym(v.Aux) 20633 ptr := v_0 20634 if v_1.Op != OpAMD64FlagLT_UGT { 20635 break 20636 } 20637 mem := v_2 20638 v.reset(OpAMD64MOVBstore) 20639 v.AuxInt = int32ToAuxInt(off) 20640 v.Aux = symToAux(sym) 20641 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20642 v0.AuxInt = int32ToAuxInt(0) 20643 v.AddArg3(ptr, v0, mem) 20644 return true 20645 } 20646 // match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem) 20647 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 20648 for { 20649 off := auxIntToInt32(v.AuxInt) 20650 sym := auxToSym(v.Aux) 20651 ptr := v_0 20652 if v_1.Op != OpAMD64FlagGT_ULT { 20653 break 20654 } 20655 mem := v_2 20656 v.reset(OpAMD64MOVBstore) 20657 v.AuxInt = int32ToAuxInt(off) 20658 v.Aux = symToAux(sym) 20659 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20660 v0.AuxInt = int32ToAuxInt(1) 20661 v.AddArg3(ptr, v0, mem) 20662 return true 20663 } 20664 // match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem) 20665 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 20666 for { 20667 off := auxIntToInt32(v.AuxInt) 20668 sym := auxToSym(v.Aux) 20669 ptr := v_0 20670 if v_1.Op != OpAMD64FlagGT_UGT { 20671 break 20672 } 20673 mem := v_2 20674 v.reset(OpAMD64MOVBstore) 20675 v.AuxInt = int32ToAuxInt(off) 20676 v.Aux = symToAux(sym) 20677 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20678 v0.AuxInt = int32ToAuxInt(0) 20679 v.AddArg3(ptr, v0, mem) 20680 return true 20681 } 20682 return false 20683 } 20684 func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { 20685 v_0 := v.Args[0] 20686 b := v.Block 20687 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 20688 // result: (SETAE (BTL x y)) 20689 for { 20690 if v_0.Op != OpAMD64TESTL { 20691 break 20692 } 20693 _ = v_0.Args[1] 20694 v_0_0 := v_0.Args[0] 20695 v_0_1 := v_0.Args[1] 20696 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 20697 if v_0_0.Op != OpAMD64SHLL { 20698 continue 20699 } 20700 x := v_0_0.Args[1] 20701 v_0_0_0 := v_0_0.Args[0] 20702 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { 20703 continue 20704 } 20705 y := v_0_1 20706 v.reset(OpAMD64SETAE) 20707 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 20708 v0.AddArg2(x, y) 20709 v.AddArg(v0) 20710 return true 20711 } 20712 break 20713 } 20714 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 20715 // result: (SETAE (BTQ x y)) 20716 for { 20717 if v_0.Op != OpAMD64TESTQ { 20718 break 20719 } 20720 _ = v_0.Args[1] 20721 v_0_0 := v_0.Args[0] 20722 v_0_1 := v_0.Args[1] 20723 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 20724 if v_0_0.Op != OpAMD64SHLQ { 20725 continue 20726 } 20727 x := v_0_0.Args[1] 20728 v_0_0_0 := v_0_0.Args[0] 20729 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { 20730 continue 20731 } 20732 y := v_0_1 20733 v.reset(OpAMD64SETAE) 20734 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 20735 v0.AddArg2(x, y) 20736 v.AddArg(v0) 20737 return true 20738 } 20739 break 20740 } 20741 // match: (SETEQ (TESTLconst [c] x)) 20742 // cond: isUint32PowerOfTwo(int64(c)) 20743 // result: (SETAE (BTLconst [int8(log32(c))] x)) 20744 for { 20745 if v_0.Op != OpAMD64TESTLconst { 20746 break 20747 } 20748 c := auxIntToInt32(v_0.AuxInt) 20749 x := v_0.Args[0] 20750 if !(isUint32PowerOfTwo(int64(c))) { 20751 break 20752 } 20753 v.reset(OpAMD64SETAE) 20754 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 20755 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 20756 v0.AddArg(x) 20757 v.AddArg(v0) 20758 return true 20759 } 20760 // match: (SETEQ (TESTQconst [c] x)) 20761 // cond: isUint64PowerOfTwo(int64(c)) 20762 // result: (SETAE (BTQconst [int8(log32(c))] x)) 20763 for { 20764 if v_0.Op != OpAMD64TESTQconst { 20765 break 20766 } 20767 c := auxIntToInt32(v_0.AuxInt) 20768 x := v_0.Args[0] 20769 if !(isUint64PowerOfTwo(int64(c))) { 20770 break 20771 } 20772 v.reset(OpAMD64SETAE) 20773 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 20774 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 20775 v0.AddArg(x) 20776 v.AddArg(v0) 20777 return true 20778 } 20779 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 20780 // cond: isUint64PowerOfTwo(c) 20781 // result: (SETAE (BTQconst [int8(log64(c))] x)) 20782 for { 20783 if v_0.Op != OpAMD64TESTQ { 20784 break 20785 } 20786 _ = v_0.Args[1] 20787 v_0_0 := v_0.Args[0] 20788 v_0_1 := v_0.Args[1] 20789 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 20790 if v_0_0.Op != OpAMD64MOVQconst { 20791 continue 20792 } 20793 c := auxIntToInt64(v_0_0.AuxInt) 20794 x := v_0_1 20795 if !(isUint64PowerOfTwo(c)) { 20796 continue 20797 } 20798 v.reset(OpAMD64SETAE) 20799 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 20800 v0.AuxInt = int8ToAuxInt(int8(log64(c))) 20801 v0.AddArg(x) 20802 v.AddArg(v0) 20803 return true 20804 } 20805 break 20806 } 20807 // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _))) 20808 // result: (SETNE (CMPLconst [0] s)) 20809 for { 20810 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 { 20811 break 20812 } 20813 s := v_0.Args[0] 20814 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { 20815 break 20816 } 20817 v.reset(OpAMD64SETNE) 20818 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 20819 v0.AuxInt = int32ToAuxInt(0) 20820 v0.AddArg(s) 20821 v.AddArg(v0) 20822 return true 20823 } 20824 // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _))) 20825 // result: (SETNE (CMPQconst [0] s)) 20826 for { 20827 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 { 20828 break 20829 } 20830 s := v_0.Args[0] 20831 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { 20832 break 20833 } 20834 v.reset(OpAMD64SETNE) 20835 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 20836 v0.AuxInt = int32ToAuxInt(0) 20837 v0.AddArg(s) 20838 v.AddArg(v0) 20839 return true 20840 } 20841 // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 20842 // cond: z1==z2 20843 // result: (SETAE (BTQconst [63] x)) 20844 for { 20845 if v_0.Op != OpAMD64TESTQ { 20846 break 20847 } 20848 _ = v_0.Args[1] 20849 v_0_0 := v_0.Args[0] 20850 v_0_1 := v_0.Args[1] 20851 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 20852 z1 := v_0_0 20853 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { 20854 continue 20855 } 20856 z1_0 := z1.Args[0] 20857 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 20858 continue 20859 } 20860 x := z1_0.Args[0] 20861 z2 := v_0_1 20862 if !(z1 == z2) { 20863 continue 20864 } 20865 v.reset(OpAMD64SETAE) 20866 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 20867 v0.AuxInt = int8ToAuxInt(63) 20868 v0.AddArg(x) 20869 v.AddArg(v0) 20870 return true 20871 } 20872 break 20873 } 20874 // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 20875 // cond: z1==z2 20876 // result: (SETAE (BTQconst [31] x)) 20877 for { 20878 if v_0.Op != OpAMD64TESTL { 20879 break 20880 } 20881 _ = v_0.Args[1] 20882 v_0_0 := v_0.Args[0] 20883 v_0_1 := v_0.Args[1] 20884 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 20885 z1 := v_0_0 20886 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { 20887 continue 20888 } 20889 z1_0 := z1.Args[0] 20890 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { 20891 continue 20892 } 20893 x := z1_0.Args[0] 20894 z2 := v_0_1 20895 if !(z1 == z2) { 20896 continue 20897 } 20898 v.reset(OpAMD64SETAE) 20899 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 20900 v0.AuxInt = int8ToAuxInt(31) 20901 v0.AddArg(x) 20902 v.AddArg(v0) 20903 return true 20904 } 20905 break 20906 } 20907 // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 20908 // cond: z1==z2 20909 // result: (SETAE (BTQconst [0] x)) 20910 for { 20911 if v_0.Op != OpAMD64TESTQ { 20912 break 20913 } 20914 _ = v_0.Args[1] 20915 v_0_0 := v_0.Args[0] 20916 v_0_1 := v_0.Args[1] 20917 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 20918 z1 := v_0_0 20919 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 20920 continue 20921 } 20922 z1_0 := z1.Args[0] 20923 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 20924 continue 20925 } 20926 x := z1_0.Args[0] 20927 z2 := v_0_1 20928 if !(z1 == z2) { 20929 continue 20930 } 20931 v.reset(OpAMD64SETAE) 20932 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 20933 v0.AuxInt = int8ToAuxInt(0) 20934 v0.AddArg(x) 20935 v.AddArg(v0) 20936 return true 20937 } 20938 break 20939 } 20940 // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 20941 // cond: z1==z2 20942 // result: (SETAE (BTLconst [0] x)) 20943 for { 20944 if v_0.Op != OpAMD64TESTL { 20945 break 20946 } 20947 _ = v_0.Args[1] 20948 v_0_0 := v_0.Args[0] 20949 v_0_1 := v_0.Args[1] 20950 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 20951 z1 := v_0_0 20952 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 20953 continue 20954 } 20955 z1_0 := z1.Args[0] 20956 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 20957 continue 20958 } 20959 x := z1_0.Args[0] 20960 z2 := v_0_1 20961 if !(z1 == z2) { 20962 continue 20963 } 20964 v.reset(OpAMD64SETAE) 20965 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 20966 v0.AuxInt = int8ToAuxInt(0) 20967 v0.AddArg(x) 20968 v.AddArg(v0) 20969 return true 20970 } 20971 break 20972 } 20973 // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2)) 20974 // cond: z1==z2 20975 // result: (SETAE (BTQconst [63] x)) 20976 for { 20977 if v_0.Op != OpAMD64TESTQ { 20978 break 20979 } 20980 _ = v_0.Args[1] 20981 v_0_0 := v_0.Args[0] 20982 v_0_1 := v_0.Args[1] 20983 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 20984 z1 := v_0_0 20985 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 20986 continue 20987 } 20988 x := z1.Args[0] 20989 z2 := v_0_1 20990 if !(z1 == z2) { 20991 continue 20992 } 20993 v.reset(OpAMD64SETAE) 20994 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 20995 v0.AuxInt = int8ToAuxInt(63) 20996 v0.AddArg(x) 20997 v.AddArg(v0) 20998 return true 20999 } 21000 break 21001 } 21002 // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2)) 21003 // cond: z1==z2 21004 // result: (SETAE (BTLconst [31] x)) 21005 for { 21006 if v_0.Op != OpAMD64TESTL { 21007 break 21008 } 21009 _ = v_0.Args[1] 21010 v_0_0 := v_0.Args[0] 21011 v_0_1 := v_0.Args[1] 21012 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 21013 z1 := v_0_0 21014 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 21015 continue 21016 } 21017 x := z1.Args[0] 21018 z2 := v_0_1 21019 if !(z1 == z2) { 21020 continue 21021 } 21022 v.reset(OpAMD64SETAE) 21023 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 21024 v0.AuxInt = int8ToAuxInt(31) 21025 v0.AddArg(x) 21026 v.AddArg(v0) 21027 return true 21028 } 21029 break 21030 } 21031 // match: (SETEQ (InvertFlags x)) 21032 // result: (SETEQ x) 21033 for { 21034 if v_0.Op != OpAMD64InvertFlags { 21035 break 21036 } 21037 x := v_0.Args[0] 21038 v.reset(OpAMD64SETEQ) 21039 v.AddArg(x) 21040 return true 21041 } 21042 // match: (SETEQ (FlagEQ)) 21043 // result: (MOVLconst [1]) 21044 for { 21045 if v_0.Op != OpAMD64FlagEQ { 21046 break 21047 } 21048 v.reset(OpAMD64MOVLconst) 21049 v.AuxInt = int32ToAuxInt(1) 21050 return true 21051 } 21052 // match: (SETEQ (FlagLT_ULT)) 21053 // result: (MOVLconst [0]) 21054 for { 21055 if v_0.Op != OpAMD64FlagLT_ULT { 21056 break 21057 } 21058 v.reset(OpAMD64MOVLconst) 21059 v.AuxInt = int32ToAuxInt(0) 21060 return true 21061 } 21062 // match: (SETEQ (FlagLT_UGT)) 21063 // result: (MOVLconst [0]) 21064 for { 21065 if v_0.Op != OpAMD64FlagLT_UGT { 21066 break 21067 } 21068 v.reset(OpAMD64MOVLconst) 21069 v.AuxInt = int32ToAuxInt(0) 21070 return true 21071 } 21072 // match: (SETEQ (FlagGT_ULT)) 21073 // result: (MOVLconst [0]) 21074 for { 21075 if v_0.Op != OpAMD64FlagGT_ULT { 21076 break 21077 } 21078 v.reset(OpAMD64MOVLconst) 21079 v.AuxInt = int32ToAuxInt(0) 21080 return true 21081 } 21082 // match: (SETEQ (FlagGT_UGT)) 21083 // result: (MOVLconst [0]) 21084 for { 21085 if v_0.Op != OpAMD64FlagGT_UGT { 21086 break 21087 } 21088 v.reset(OpAMD64MOVLconst) 21089 v.AuxInt = int32ToAuxInt(0) 21090 return true 21091 } 21092 return false 21093 } 21094 func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { 21095 v_2 := v.Args[2] 21096 v_1 := v.Args[1] 21097 v_0 := v.Args[0] 21098 b := v.Block 21099 typ := &b.Func.Config.Types 21100 // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 21101 // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) 21102 for { 21103 off := auxIntToInt32(v.AuxInt) 21104 sym := auxToSym(v.Aux) 21105 ptr := v_0 21106 if v_1.Op != OpAMD64TESTL { 21107 break 21108 } 21109 _ = v_1.Args[1] 21110 v_1_0 := v_1.Args[0] 21111 v_1_1 := v_1.Args[1] 21112 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 21113 if v_1_0.Op != OpAMD64SHLL { 21114 continue 21115 } 21116 x := v_1_0.Args[1] 21117 v_1_0_0 := v_1_0.Args[0] 21118 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 { 21119 continue 21120 } 21121 y := v_1_1 21122 mem := v_2 21123 v.reset(OpAMD64SETAEstore) 21124 v.AuxInt = int32ToAuxInt(off) 21125 v.Aux = symToAux(sym) 21126 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 21127 v0.AddArg2(x, y) 21128 v.AddArg3(ptr, v0, mem) 21129 return true 21130 } 21131 break 21132 } 21133 // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 21134 // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem) 21135 for { 21136 off := auxIntToInt32(v.AuxInt) 21137 sym := auxToSym(v.Aux) 21138 ptr := v_0 21139 if v_1.Op != OpAMD64TESTQ { 21140 break 21141 } 21142 _ = v_1.Args[1] 21143 v_1_0 := v_1.Args[0] 21144 v_1_1 := v_1.Args[1] 21145 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 21146 if v_1_0.Op != OpAMD64SHLQ { 21147 continue 21148 } 21149 x := v_1_0.Args[1] 21150 v_1_0_0 := v_1_0.Args[0] 21151 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 { 21152 continue 21153 } 21154 y := v_1_1 21155 mem := v_2 21156 v.reset(OpAMD64SETAEstore) 21157 v.AuxInt = int32ToAuxInt(off) 21158 v.Aux = symToAux(sym) 21159 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 21160 v0.AddArg2(x, y) 21161 v.AddArg3(ptr, v0, mem) 21162 return true 21163 } 21164 break 21165 } 21166 // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem) 21167 // cond: isUint32PowerOfTwo(int64(c)) 21168 // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem) 21169 for { 21170 off := auxIntToInt32(v.AuxInt) 21171 sym := auxToSym(v.Aux) 21172 ptr := v_0 21173 if v_1.Op != OpAMD64TESTLconst { 21174 break 21175 } 21176 c := auxIntToInt32(v_1.AuxInt) 21177 x := v_1.Args[0] 21178 mem := v_2 21179 if !(isUint32PowerOfTwo(int64(c))) { 21180 break 21181 } 21182 v.reset(OpAMD64SETAEstore) 21183 v.AuxInt = int32ToAuxInt(off) 21184 v.Aux = symToAux(sym) 21185 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 21186 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 21187 v0.AddArg(x) 21188 v.AddArg3(ptr, v0, mem) 21189 return true 21190 } 21191 // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem) 21192 // cond: isUint64PowerOfTwo(int64(c)) 21193 // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem) 21194 for { 21195 off := auxIntToInt32(v.AuxInt) 21196 sym := auxToSym(v.Aux) 21197 ptr := v_0 21198 if v_1.Op != OpAMD64TESTQconst { 21199 break 21200 } 21201 c := auxIntToInt32(v_1.AuxInt) 21202 x := v_1.Args[0] 21203 mem := v_2 21204 if !(isUint64PowerOfTwo(int64(c))) { 21205 break 21206 } 21207 v.reset(OpAMD64SETAEstore) 21208 v.AuxInt = int32ToAuxInt(off) 21209 v.Aux = symToAux(sym) 21210 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 21211 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 21212 v0.AddArg(x) 21213 v.AddArg3(ptr, v0, mem) 21214 return true 21215 } 21216 // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 21217 // cond: isUint64PowerOfTwo(c) 21218 // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem) 21219 for { 21220 off := auxIntToInt32(v.AuxInt) 21221 sym := auxToSym(v.Aux) 21222 ptr := v_0 21223 if v_1.Op != OpAMD64TESTQ { 21224 break 21225 } 21226 _ = v_1.Args[1] 21227 v_1_0 := v_1.Args[0] 21228 v_1_1 := v_1.Args[1] 21229 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 21230 if v_1_0.Op != OpAMD64MOVQconst { 21231 continue 21232 } 21233 c := auxIntToInt64(v_1_0.AuxInt) 21234 x := v_1_1 21235 mem := v_2 21236 if !(isUint64PowerOfTwo(c)) { 21237 continue 21238 } 21239 v.reset(OpAMD64SETAEstore) 21240 v.AuxInt = int32ToAuxInt(off) 21241 v.Aux = symToAux(sym) 21242 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 21243 v0.AuxInt = int8ToAuxInt(int8(log64(c))) 21244 v0.AddArg(x) 21245 v.AddArg3(ptr, v0, mem) 21246 return true 21247 } 21248 break 21249 } 21250 // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) 21251 // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem) 21252 for { 21253 off := auxIntToInt32(v.AuxInt) 21254 sym := auxToSym(v.Aux) 21255 ptr := v_0 21256 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 { 21257 break 21258 } 21259 s := v_1.Args[0] 21260 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { 21261 break 21262 } 21263 mem := v_2 21264 v.reset(OpAMD64SETNEstore) 21265 v.AuxInt = int32ToAuxInt(off) 21266 v.Aux = symToAux(sym) 21267 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 21268 v0.AuxInt = int32ToAuxInt(0) 21269 v0.AddArg(s) 21270 v.AddArg3(ptr, v0, mem) 21271 return true 21272 } 21273 // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) 21274 // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem) 21275 for { 21276 off := auxIntToInt32(v.AuxInt) 21277 sym := auxToSym(v.Aux) 21278 ptr := v_0 21279 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 { 21280 break 21281 } 21282 s := v_1.Args[0] 21283 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { 21284 break 21285 } 21286 mem := v_2 21287 v.reset(OpAMD64SETNEstore) 21288 v.AuxInt = int32ToAuxInt(off) 21289 v.Aux = symToAux(sym) 21290 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 21291 v0.AuxInt = int32ToAuxInt(0) 21292 v0.AddArg(s) 21293 v.AddArg3(ptr, v0, mem) 21294 return true 21295 } 21296 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) 21297 // cond: z1==z2 21298 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 21299 for { 21300 off := auxIntToInt32(v.AuxInt) 21301 sym := auxToSym(v.Aux) 21302 ptr := v_0 21303 if v_1.Op != OpAMD64TESTQ { 21304 break 21305 } 21306 _ = v_1.Args[1] 21307 v_1_0 := v_1.Args[0] 21308 v_1_1 := v_1.Args[1] 21309 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 21310 z1 := v_1_0 21311 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { 21312 continue 21313 } 21314 z1_0 := z1.Args[0] 21315 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 21316 continue 21317 } 21318 x := z1_0.Args[0] 21319 z2 := v_1_1 21320 mem := v_2 21321 if !(z1 == z2) { 21322 continue 21323 } 21324 v.reset(OpAMD64SETAEstore) 21325 v.AuxInt = int32ToAuxInt(off) 21326 v.Aux = symToAux(sym) 21327 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 21328 v0.AuxInt = int8ToAuxInt(63) 21329 v0.AddArg(x) 21330 v.AddArg3(ptr, v0, mem) 21331 return true 21332 } 21333 break 21334 } 21335 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) 21336 // cond: z1==z2 21337 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 21338 for { 21339 off := auxIntToInt32(v.AuxInt) 21340 sym := auxToSym(v.Aux) 21341 ptr := v_0 21342 if v_1.Op != OpAMD64TESTL { 21343 break 21344 } 21345 _ = v_1.Args[1] 21346 v_1_0 := v_1.Args[0] 21347 v_1_1 := v_1.Args[1] 21348 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 21349 z1 := v_1_0 21350 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { 21351 continue 21352 } 21353 z1_0 := z1.Args[0] 21354 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 21355 continue 21356 } 21357 x := z1_0.Args[0] 21358 z2 := v_1_1 21359 mem := v_2 21360 if !(z1 == z2) { 21361 continue 21362 } 21363 v.reset(OpAMD64SETAEstore) 21364 v.AuxInt = int32ToAuxInt(off) 21365 v.Aux = symToAux(sym) 21366 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 21367 v0.AuxInt = int8ToAuxInt(31) 21368 v0.AddArg(x) 21369 v.AddArg3(ptr, v0, mem) 21370 return true 21371 } 21372 break 21373 } 21374 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) 21375 // cond: z1==z2 21376 // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem) 21377 for { 21378 off := auxIntToInt32(v.AuxInt) 21379 sym := auxToSym(v.Aux) 21380 ptr := v_0 21381 if v_1.Op != OpAMD64TESTQ { 21382 break 21383 } 21384 _ = v_1.Args[1] 21385 v_1_0 := v_1.Args[0] 21386 v_1_1 := v_1.Args[1] 21387 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 21388 z1 := v_1_0 21389 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 21390 continue 21391 } 21392 z1_0 := z1.Args[0] 21393 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 21394 continue 21395 } 21396 x := z1_0.Args[0] 21397 z2 := v_1_1 21398 mem := v_2 21399 if !(z1 == z2) { 21400 continue 21401 } 21402 v.reset(OpAMD64SETAEstore) 21403 v.AuxInt = int32ToAuxInt(off) 21404 v.Aux = symToAux(sym) 21405 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 21406 v0.AuxInt = int8ToAuxInt(0) 21407 v0.AddArg(x) 21408 v.AddArg3(ptr, v0, mem) 21409 return true 21410 } 21411 break 21412 } 21413 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) 21414 // cond: z1==z2 21415 // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem) 21416 for { 21417 off := auxIntToInt32(v.AuxInt) 21418 sym := auxToSym(v.Aux) 21419 ptr := v_0 21420 if v_1.Op != OpAMD64TESTL { 21421 break 21422 } 21423 _ = v_1.Args[1] 21424 v_1_0 := v_1.Args[0] 21425 v_1_1 := v_1.Args[1] 21426 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 21427 z1 := v_1_0 21428 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 21429 continue 21430 } 21431 z1_0 := z1.Args[0] 21432 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 21433 continue 21434 } 21435 x := z1_0.Args[0] 21436 z2 := v_1_1 21437 mem := v_2 21438 if !(z1 == z2) { 21439 continue 21440 } 21441 v.reset(OpAMD64SETAEstore) 21442 v.AuxInt = int32ToAuxInt(off) 21443 v.Aux = symToAux(sym) 21444 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 21445 v0.AuxInt = int8ToAuxInt(0) 21446 v0.AddArg(x) 21447 v.AddArg3(ptr, v0, mem) 21448 return true 21449 } 21450 break 21451 } 21452 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) 21453 // cond: z1==z2 21454 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 21455 for { 21456 off := auxIntToInt32(v.AuxInt) 21457 sym := auxToSym(v.Aux) 21458 ptr := v_0 21459 if v_1.Op != OpAMD64TESTQ { 21460 break 21461 } 21462 _ = v_1.Args[1] 21463 v_1_0 := v_1.Args[0] 21464 v_1_1 := v_1.Args[1] 21465 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 21466 z1 := v_1_0 21467 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 21468 continue 21469 } 21470 x := z1.Args[0] 21471 z2 := v_1_1 21472 mem := v_2 21473 if !(z1 == z2) { 21474 continue 21475 } 21476 v.reset(OpAMD64SETAEstore) 21477 v.AuxInt = int32ToAuxInt(off) 21478 v.Aux = symToAux(sym) 21479 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 21480 v0.AuxInt = int8ToAuxInt(63) 21481 v0.AddArg(x) 21482 v.AddArg3(ptr, v0, mem) 21483 return true 21484 } 21485 break 21486 } 21487 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) 21488 // cond: z1==z2 21489 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 21490 for { 21491 off := auxIntToInt32(v.AuxInt) 21492 sym := auxToSym(v.Aux) 21493 ptr := v_0 21494 if v_1.Op != OpAMD64TESTL { 21495 break 21496 } 21497 _ = v_1.Args[1] 21498 v_1_0 := v_1.Args[0] 21499 v_1_1 := v_1.Args[1] 21500 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 21501 z1 := v_1_0 21502 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 21503 continue 21504 } 21505 x := z1.Args[0] 21506 z2 := v_1_1 21507 mem := v_2 21508 if !(z1 == z2) { 21509 continue 21510 } 21511 v.reset(OpAMD64SETAEstore) 21512 v.AuxInt = int32ToAuxInt(off) 21513 v.Aux = symToAux(sym) 21514 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 21515 v0.AuxInt = int8ToAuxInt(31) 21516 v0.AddArg(x) 21517 v.AddArg3(ptr, v0, mem) 21518 return true 21519 } 21520 break 21521 } 21522 // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem) 21523 // result: (SETEQstore [off] {sym} ptr x mem) 21524 for { 21525 off := auxIntToInt32(v.AuxInt) 21526 sym := auxToSym(v.Aux) 21527 ptr := v_0 21528 if v_1.Op != OpAMD64InvertFlags { 21529 break 21530 } 21531 x := v_1.Args[0] 21532 mem := v_2 21533 v.reset(OpAMD64SETEQstore) 21534 v.AuxInt = int32ToAuxInt(off) 21535 v.Aux = symToAux(sym) 21536 v.AddArg3(ptr, x, mem) 21537 return true 21538 } 21539 // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem) 21540 // cond: is32Bit(int64(off1)+int64(off2)) 21541 // result: (SETEQstore [off1+off2] {sym} base val mem) 21542 for { 21543 off1 := auxIntToInt32(v.AuxInt) 21544 sym := auxToSym(v.Aux) 21545 if v_0.Op != OpAMD64ADDQconst { 21546 break 21547 } 21548 off2 := auxIntToInt32(v_0.AuxInt) 21549 base := v_0.Args[0] 21550 val := v_1 21551 mem := v_2 21552 if !(is32Bit(int64(off1) + int64(off2))) { 21553 break 21554 } 21555 v.reset(OpAMD64SETEQstore) 21556 v.AuxInt = int32ToAuxInt(off1 + off2) 21557 v.Aux = symToAux(sym) 21558 v.AddArg3(base, val, mem) 21559 return true 21560 } 21561 // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 21562 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 21563 // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 21564 for { 21565 off1 := auxIntToInt32(v.AuxInt) 21566 sym1 := auxToSym(v.Aux) 21567 if v_0.Op != OpAMD64LEAQ { 21568 break 21569 } 21570 off2 := auxIntToInt32(v_0.AuxInt) 21571 sym2 := auxToSym(v_0.Aux) 21572 base := v_0.Args[0] 21573 val := v_1 21574 mem := v_2 21575 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 21576 break 21577 } 21578 v.reset(OpAMD64SETEQstore) 21579 v.AuxInt = int32ToAuxInt(off1 + off2) 21580 v.Aux = symToAux(mergeSym(sym1, sym2)) 21581 v.AddArg3(base, val, mem) 21582 return true 21583 } 21584 // match: (SETEQstore [off] {sym} ptr (FlagEQ) mem) 21585 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 21586 for { 21587 off := auxIntToInt32(v.AuxInt) 21588 sym := auxToSym(v.Aux) 21589 ptr := v_0 21590 if v_1.Op != OpAMD64FlagEQ { 21591 break 21592 } 21593 mem := v_2 21594 v.reset(OpAMD64MOVBstore) 21595 v.AuxInt = int32ToAuxInt(off) 21596 v.Aux = symToAux(sym) 21597 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 21598 v0.AuxInt = int32ToAuxInt(1) 21599 v.AddArg3(ptr, v0, mem) 21600 return true 21601 } 21602 // match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) 21603 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 21604 for { 21605 off := auxIntToInt32(v.AuxInt) 21606 sym := auxToSym(v.Aux) 21607 ptr := v_0 21608 if v_1.Op != OpAMD64FlagLT_ULT { 21609 break 21610 } 21611 mem := v_2 21612 v.reset(OpAMD64MOVBstore) 21613 v.AuxInt = int32ToAuxInt(off) 21614 v.Aux = symToAux(sym) 21615 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 21616 v0.AuxInt = int32ToAuxInt(0) 21617 v.AddArg3(ptr, v0, mem) 21618 return true 21619 } 21620 // match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) 21621 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 21622 for { 21623 off := auxIntToInt32(v.AuxInt) 21624 sym := auxToSym(v.Aux) 21625 ptr := v_0 21626 if v_1.Op != OpAMD64FlagLT_UGT { 21627 break 21628 } 21629 mem := v_2 21630 v.reset(OpAMD64MOVBstore) 21631 v.AuxInt = int32ToAuxInt(off) 21632 v.Aux = symToAux(sym) 21633 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 21634 v0.AuxInt = int32ToAuxInt(0) 21635 v.AddArg3(ptr, v0, mem) 21636 return true 21637 } 21638 // match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) 21639 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 21640 for { 21641 off := auxIntToInt32(v.AuxInt) 21642 sym := auxToSym(v.Aux) 21643 ptr := v_0 21644 if v_1.Op != OpAMD64FlagGT_ULT { 21645 break 21646 } 21647 mem := v_2 21648 v.reset(OpAMD64MOVBstore) 21649 v.AuxInt = int32ToAuxInt(off) 21650 v.Aux = symToAux(sym) 21651 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 21652 v0.AuxInt = int32ToAuxInt(0) 21653 v.AddArg3(ptr, v0, mem) 21654 return true 21655 } 21656 // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) 21657 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 21658 for { 21659 off := auxIntToInt32(v.AuxInt) 21660 sym := auxToSym(v.Aux) 21661 ptr := v_0 21662 if v_1.Op != OpAMD64FlagGT_UGT { 21663 break 21664 } 21665 mem := v_2 21666 v.reset(OpAMD64MOVBstore) 21667 v.AuxInt = int32ToAuxInt(off) 21668 v.Aux = symToAux(sym) 21669 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 21670 v0.AuxInt = int32ToAuxInt(0) 21671 v.AddArg3(ptr, v0, mem) 21672 return true 21673 } 21674 return false 21675 } 21676 func rewriteValueAMD64_OpAMD64SETG(v *Value) bool { 21677 v_0 := v.Args[0] 21678 // match: (SETG (InvertFlags x)) 21679 // result: (SETL x) 21680 for { 21681 if v_0.Op != OpAMD64InvertFlags { 21682 break 21683 } 21684 x := v_0.Args[0] 21685 v.reset(OpAMD64SETL) 21686 v.AddArg(x) 21687 return true 21688 } 21689 // match: (SETG (FlagEQ)) 21690 // result: (MOVLconst [0]) 21691 for { 21692 if v_0.Op != OpAMD64FlagEQ { 21693 break 21694 } 21695 v.reset(OpAMD64MOVLconst) 21696 v.AuxInt = int32ToAuxInt(0) 21697 return true 21698 } 21699 // match: (SETG (FlagLT_ULT)) 21700 // result: (MOVLconst [0]) 21701 for { 21702 if v_0.Op != OpAMD64FlagLT_ULT { 21703 break 21704 } 21705 v.reset(OpAMD64MOVLconst) 21706 v.AuxInt = int32ToAuxInt(0) 21707 return true 21708 } 21709 // match: (SETG (FlagLT_UGT)) 21710 // result: (MOVLconst [0]) 21711 for { 21712 if v_0.Op != OpAMD64FlagLT_UGT { 21713 break 21714 } 21715 v.reset(OpAMD64MOVLconst) 21716 v.AuxInt = int32ToAuxInt(0) 21717 return true 21718 } 21719 // match: (SETG (FlagGT_ULT)) 21720 // result: (MOVLconst [1]) 21721 for { 21722 if v_0.Op != OpAMD64FlagGT_ULT { 21723 break 21724 } 21725 v.reset(OpAMD64MOVLconst) 21726 v.AuxInt = int32ToAuxInt(1) 21727 return true 21728 } 21729 // match: (SETG (FlagGT_UGT)) 21730 // result: (MOVLconst [1]) 21731 for { 21732 if v_0.Op != OpAMD64FlagGT_UGT { 21733 break 21734 } 21735 v.reset(OpAMD64MOVLconst) 21736 v.AuxInt = int32ToAuxInt(1) 21737 return true 21738 } 21739 return false 21740 } 21741 func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool { 21742 v_0 := v.Args[0] 21743 // match: (SETGE (InvertFlags x)) 21744 // result: (SETLE x) 21745 for { 21746 if v_0.Op != OpAMD64InvertFlags { 21747 break 21748 } 21749 x := v_0.Args[0] 21750 v.reset(OpAMD64SETLE) 21751 v.AddArg(x) 21752 return true 21753 } 21754 // match: (SETGE (FlagEQ)) 21755 // result: (MOVLconst [1]) 21756 for { 21757 if v_0.Op != OpAMD64FlagEQ { 21758 break 21759 } 21760 v.reset(OpAMD64MOVLconst) 21761 v.AuxInt = int32ToAuxInt(1) 21762 return true 21763 } 21764 // match: (SETGE (FlagLT_ULT)) 21765 // result: (MOVLconst [0]) 21766 for { 21767 if v_0.Op != OpAMD64FlagLT_ULT { 21768 break 21769 } 21770 v.reset(OpAMD64MOVLconst) 21771 v.AuxInt = int32ToAuxInt(0) 21772 return true 21773 } 21774 // match: (SETGE (FlagLT_UGT)) 21775 // result: (MOVLconst [0]) 21776 for { 21777 if v_0.Op != OpAMD64FlagLT_UGT { 21778 break 21779 } 21780 v.reset(OpAMD64MOVLconst) 21781 v.AuxInt = int32ToAuxInt(0) 21782 return true 21783 } 21784 // match: (SETGE (FlagGT_ULT)) 21785 // result: (MOVLconst [1]) 21786 for { 21787 if v_0.Op != OpAMD64FlagGT_ULT { 21788 break 21789 } 21790 v.reset(OpAMD64MOVLconst) 21791 v.AuxInt = int32ToAuxInt(1) 21792 return true 21793 } 21794 // match: (SETGE (FlagGT_UGT)) 21795 // result: (MOVLconst [1]) 21796 for { 21797 if v_0.Op != OpAMD64FlagGT_UGT { 21798 break 21799 } 21800 v.reset(OpAMD64MOVLconst) 21801 v.AuxInt = int32ToAuxInt(1) 21802 return true 21803 } 21804 return false 21805 } 21806 func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { 21807 v_2 := v.Args[2] 21808 v_1 := v.Args[1] 21809 v_0 := v.Args[0] 21810 b := v.Block 21811 typ := &b.Func.Config.Types 21812 // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem) 21813 // result: (SETLEstore [off] {sym} ptr x mem) 21814 for { 21815 off := auxIntToInt32(v.AuxInt) 21816 sym := auxToSym(v.Aux) 21817 ptr := v_0 21818 if v_1.Op != OpAMD64InvertFlags { 21819 break 21820 } 21821 x := v_1.Args[0] 21822 mem := v_2 21823 v.reset(OpAMD64SETLEstore) 21824 v.AuxInt = int32ToAuxInt(off) 21825 v.Aux = symToAux(sym) 21826 v.AddArg3(ptr, x, mem) 21827 return true 21828 } 21829 // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem) 21830 // cond: is32Bit(int64(off1)+int64(off2)) 21831 // result: (SETGEstore [off1+off2] {sym} base val mem) 21832 for { 21833 off1 := auxIntToInt32(v.AuxInt) 21834 sym := auxToSym(v.Aux) 21835 if v_0.Op != OpAMD64ADDQconst { 21836 break 21837 } 21838 off2 := auxIntToInt32(v_0.AuxInt) 21839 base := v_0.Args[0] 21840 val := v_1 21841 mem := v_2 21842 if !(is32Bit(int64(off1) + int64(off2))) { 21843 break 21844 } 21845 v.reset(OpAMD64SETGEstore) 21846 v.AuxInt = int32ToAuxInt(off1 + off2) 21847 v.Aux = symToAux(sym) 21848 v.AddArg3(base, val, mem) 21849 return true 21850 } 21851 // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 21852 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 21853 // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 21854 for { 21855 off1 := auxIntToInt32(v.AuxInt) 21856 sym1 := auxToSym(v.Aux) 21857 if v_0.Op != OpAMD64LEAQ { 21858 break 21859 } 21860 off2 := auxIntToInt32(v_0.AuxInt) 21861 sym2 := auxToSym(v_0.Aux) 21862 base := v_0.Args[0] 21863 val := v_1 21864 mem := v_2 21865 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 21866 break 21867 } 21868 v.reset(OpAMD64SETGEstore) 21869 v.AuxInt = int32ToAuxInt(off1 + off2) 21870 v.Aux = symToAux(mergeSym(sym1, sym2)) 21871 v.AddArg3(base, val, mem) 21872 return true 21873 } 21874 // match: (SETGEstore [off] {sym} ptr (FlagEQ) mem) 21875 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 21876 for { 21877 off := auxIntToInt32(v.AuxInt) 21878 sym := auxToSym(v.Aux) 21879 ptr := v_0 21880 if v_1.Op != OpAMD64FlagEQ { 21881 break 21882 } 21883 mem := v_2 21884 v.reset(OpAMD64MOVBstore) 21885 v.AuxInt = int32ToAuxInt(off) 21886 v.Aux = symToAux(sym) 21887 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 21888 v0.AuxInt = int32ToAuxInt(1) 21889 v.AddArg3(ptr, v0, mem) 21890 return true 21891 } 21892 // match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) 21893 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 21894 for { 21895 off := auxIntToInt32(v.AuxInt) 21896 sym := auxToSym(v.Aux) 21897 ptr := v_0 21898 if v_1.Op != OpAMD64FlagLT_ULT { 21899 break 21900 } 21901 mem := v_2 21902 v.reset(OpAMD64MOVBstore) 21903 v.AuxInt = int32ToAuxInt(off) 21904 v.Aux = symToAux(sym) 21905 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 21906 v0.AuxInt = int32ToAuxInt(0) 21907 v.AddArg3(ptr, v0, mem) 21908 return true 21909 } 21910 // match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) 21911 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 21912 for { 21913 off := auxIntToInt32(v.AuxInt) 21914 sym := auxToSym(v.Aux) 21915 ptr := v_0 21916 if v_1.Op != OpAMD64FlagLT_UGT { 21917 break 21918 } 21919 mem := v_2 21920 v.reset(OpAMD64MOVBstore) 21921 v.AuxInt = int32ToAuxInt(off) 21922 v.Aux = symToAux(sym) 21923 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 21924 v0.AuxInt = int32ToAuxInt(0) 21925 v.AddArg3(ptr, v0, mem) 21926 return true 21927 } 21928 // match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) 21929 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 21930 for { 21931 off := auxIntToInt32(v.AuxInt) 21932 sym := auxToSym(v.Aux) 21933 ptr := v_0 21934 if v_1.Op != OpAMD64FlagGT_ULT { 21935 break 21936 } 21937 mem := v_2 21938 v.reset(OpAMD64MOVBstore) 21939 v.AuxInt = int32ToAuxInt(off) 21940 v.Aux = symToAux(sym) 21941 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 21942 v0.AuxInt = int32ToAuxInt(1) 21943 v.AddArg3(ptr, v0, mem) 21944 return true 21945 } 21946 // match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) 21947 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 21948 for { 21949 off := auxIntToInt32(v.AuxInt) 21950 sym := auxToSym(v.Aux) 21951 ptr := v_0 21952 if v_1.Op != OpAMD64FlagGT_UGT { 21953 break 21954 } 21955 mem := v_2 21956 v.reset(OpAMD64MOVBstore) 21957 v.AuxInt = int32ToAuxInt(off) 21958 v.Aux = symToAux(sym) 21959 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 21960 v0.AuxInt = int32ToAuxInt(1) 21961 v.AddArg3(ptr, v0, mem) 21962 return true 21963 } 21964 return false 21965 } 21966 func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { 21967 v_2 := v.Args[2] 21968 v_1 := v.Args[1] 21969 v_0 := v.Args[0] 21970 b := v.Block 21971 typ := &b.Func.Config.Types 21972 // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem) 21973 // result: (SETLstore [off] {sym} ptr x mem) 21974 for { 21975 off := auxIntToInt32(v.AuxInt) 21976 sym := auxToSym(v.Aux) 21977 ptr := v_0 21978 if v_1.Op != OpAMD64InvertFlags { 21979 break 21980 } 21981 x := v_1.Args[0] 21982 mem := v_2 21983 v.reset(OpAMD64SETLstore) 21984 v.AuxInt = int32ToAuxInt(off) 21985 v.Aux = symToAux(sym) 21986 v.AddArg3(ptr, x, mem) 21987 return true 21988 } 21989 // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem) 21990 // cond: is32Bit(int64(off1)+int64(off2)) 21991 // result: (SETGstore [off1+off2] {sym} base val mem) 21992 for { 21993 off1 := auxIntToInt32(v.AuxInt) 21994 sym := auxToSym(v.Aux) 21995 if v_0.Op != OpAMD64ADDQconst { 21996 break 21997 } 21998 off2 := auxIntToInt32(v_0.AuxInt) 21999 base := v_0.Args[0] 22000 val := v_1 22001 mem := v_2 22002 if !(is32Bit(int64(off1) + int64(off2))) { 22003 break 22004 } 22005 v.reset(OpAMD64SETGstore) 22006 v.AuxInt = int32ToAuxInt(off1 + off2) 22007 v.Aux = symToAux(sym) 22008 v.AddArg3(base, val, mem) 22009 return true 22010 } 22011 // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 22012 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 22013 // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 22014 for { 22015 off1 := auxIntToInt32(v.AuxInt) 22016 sym1 := auxToSym(v.Aux) 22017 if v_0.Op != OpAMD64LEAQ { 22018 break 22019 } 22020 off2 := auxIntToInt32(v_0.AuxInt) 22021 sym2 := auxToSym(v_0.Aux) 22022 base := v_0.Args[0] 22023 val := v_1 22024 mem := v_2 22025 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 22026 break 22027 } 22028 v.reset(OpAMD64SETGstore) 22029 v.AuxInt = int32ToAuxInt(off1 + off2) 22030 v.Aux = symToAux(mergeSym(sym1, sym2)) 22031 v.AddArg3(base, val, mem) 22032 return true 22033 } 22034 // match: (SETGstore [off] {sym} ptr (FlagEQ) mem) 22035 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 22036 for { 22037 off := auxIntToInt32(v.AuxInt) 22038 sym := auxToSym(v.Aux) 22039 ptr := v_0 22040 if v_1.Op != OpAMD64FlagEQ { 22041 break 22042 } 22043 mem := v_2 22044 v.reset(OpAMD64MOVBstore) 22045 v.AuxInt = int32ToAuxInt(off) 22046 v.Aux = symToAux(sym) 22047 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22048 v0.AuxInt = int32ToAuxInt(0) 22049 v.AddArg3(ptr, v0, mem) 22050 return true 22051 } 22052 // match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem) 22053 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 22054 for { 22055 off := auxIntToInt32(v.AuxInt) 22056 sym := auxToSym(v.Aux) 22057 ptr := v_0 22058 if v_1.Op != OpAMD64FlagLT_ULT { 22059 break 22060 } 22061 mem := v_2 22062 v.reset(OpAMD64MOVBstore) 22063 v.AuxInt = int32ToAuxInt(off) 22064 v.Aux = symToAux(sym) 22065 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22066 v0.AuxInt = int32ToAuxInt(0) 22067 v.AddArg3(ptr, v0, mem) 22068 return true 22069 } 22070 // match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem) 22071 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 22072 for { 22073 off := auxIntToInt32(v.AuxInt) 22074 sym := auxToSym(v.Aux) 22075 ptr := v_0 22076 if v_1.Op != OpAMD64FlagLT_UGT { 22077 break 22078 } 22079 mem := v_2 22080 v.reset(OpAMD64MOVBstore) 22081 v.AuxInt = int32ToAuxInt(off) 22082 v.Aux = symToAux(sym) 22083 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22084 v0.AuxInt = int32ToAuxInt(0) 22085 v.AddArg3(ptr, v0, mem) 22086 return true 22087 } 22088 // match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem) 22089 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 22090 for { 22091 off := auxIntToInt32(v.AuxInt) 22092 sym := auxToSym(v.Aux) 22093 ptr := v_0 22094 if v_1.Op != OpAMD64FlagGT_ULT { 22095 break 22096 } 22097 mem := v_2 22098 v.reset(OpAMD64MOVBstore) 22099 v.AuxInt = int32ToAuxInt(off) 22100 v.Aux = symToAux(sym) 22101 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22102 v0.AuxInt = int32ToAuxInt(1) 22103 v.AddArg3(ptr, v0, mem) 22104 return true 22105 } 22106 // match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem) 22107 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 22108 for { 22109 off := auxIntToInt32(v.AuxInt) 22110 sym := auxToSym(v.Aux) 22111 ptr := v_0 22112 if v_1.Op != OpAMD64FlagGT_UGT { 22113 break 22114 } 22115 mem := v_2 22116 v.reset(OpAMD64MOVBstore) 22117 v.AuxInt = int32ToAuxInt(off) 22118 v.Aux = symToAux(sym) 22119 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22120 v0.AuxInt = int32ToAuxInt(1) 22121 v.AddArg3(ptr, v0, mem) 22122 return true 22123 } 22124 return false 22125 } 22126 func rewriteValueAMD64_OpAMD64SETL(v *Value) bool { 22127 v_0 := v.Args[0] 22128 // match: (SETL (InvertFlags x)) 22129 // result: (SETG x) 22130 for { 22131 if v_0.Op != OpAMD64InvertFlags { 22132 break 22133 } 22134 x := v_0.Args[0] 22135 v.reset(OpAMD64SETG) 22136 v.AddArg(x) 22137 return true 22138 } 22139 // match: (SETL (FlagEQ)) 22140 // result: (MOVLconst [0]) 22141 for { 22142 if v_0.Op != OpAMD64FlagEQ { 22143 break 22144 } 22145 v.reset(OpAMD64MOVLconst) 22146 v.AuxInt = int32ToAuxInt(0) 22147 return true 22148 } 22149 // match: (SETL (FlagLT_ULT)) 22150 // result: (MOVLconst [1]) 22151 for { 22152 if v_0.Op != OpAMD64FlagLT_ULT { 22153 break 22154 } 22155 v.reset(OpAMD64MOVLconst) 22156 v.AuxInt = int32ToAuxInt(1) 22157 return true 22158 } 22159 // match: (SETL (FlagLT_UGT)) 22160 // result: (MOVLconst [1]) 22161 for { 22162 if v_0.Op != OpAMD64FlagLT_UGT { 22163 break 22164 } 22165 v.reset(OpAMD64MOVLconst) 22166 v.AuxInt = int32ToAuxInt(1) 22167 return true 22168 } 22169 // match: (SETL (FlagGT_ULT)) 22170 // result: (MOVLconst [0]) 22171 for { 22172 if v_0.Op != OpAMD64FlagGT_ULT { 22173 break 22174 } 22175 v.reset(OpAMD64MOVLconst) 22176 v.AuxInt = int32ToAuxInt(0) 22177 return true 22178 } 22179 // match: (SETL (FlagGT_UGT)) 22180 // result: (MOVLconst [0]) 22181 for { 22182 if v_0.Op != OpAMD64FlagGT_UGT { 22183 break 22184 } 22185 v.reset(OpAMD64MOVLconst) 22186 v.AuxInt = int32ToAuxInt(0) 22187 return true 22188 } 22189 return false 22190 } 22191 func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool { 22192 v_0 := v.Args[0] 22193 // match: (SETLE (InvertFlags x)) 22194 // result: (SETGE x) 22195 for { 22196 if v_0.Op != OpAMD64InvertFlags { 22197 break 22198 } 22199 x := v_0.Args[0] 22200 v.reset(OpAMD64SETGE) 22201 v.AddArg(x) 22202 return true 22203 } 22204 // match: (SETLE (FlagEQ)) 22205 // result: (MOVLconst [1]) 22206 for { 22207 if v_0.Op != OpAMD64FlagEQ { 22208 break 22209 } 22210 v.reset(OpAMD64MOVLconst) 22211 v.AuxInt = int32ToAuxInt(1) 22212 return true 22213 } 22214 // match: (SETLE (FlagLT_ULT)) 22215 // result: (MOVLconst [1]) 22216 for { 22217 if v_0.Op != OpAMD64FlagLT_ULT { 22218 break 22219 } 22220 v.reset(OpAMD64MOVLconst) 22221 v.AuxInt = int32ToAuxInt(1) 22222 return true 22223 } 22224 // match: (SETLE (FlagLT_UGT)) 22225 // result: (MOVLconst [1]) 22226 for { 22227 if v_0.Op != OpAMD64FlagLT_UGT { 22228 break 22229 } 22230 v.reset(OpAMD64MOVLconst) 22231 v.AuxInt = int32ToAuxInt(1) 22232 return true 22233 } 22234 // match: (SETLE (FlagGT_ULT)) 22235 // result: (MOVLconst [0]) 22236 for { 22237 if v_0.Op != OpAMD64FlagGT_ULT { 22238 break 22239 } 22240 v.reset(OpAMD64MOVLconst) 22241 v.AuxInt = int32ToAuxInt(0) 22242 return true 22243 } 22244 // match: (SETLE (FlagGT_UGT)) 22245 // result: (MOVLconst [0]) 22246 for { 22247 if v_0.Op != OpAMD64FlagGT_UGT { 22248 break 22249 } 22250 v.reset(OpAMD64MOVLconst) 22251 v.AuxInt = int32ToAuxInt(0) 22252 return true 22253 } 22254 return false 22255 } 22256 func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { 22257 v_2 := v.Args[2] 22258 v_1 := v.Args[1] 22259 v_0 := v.Args[0] 22260 b := v.Block 22261 typ := &b.Func.Config.Types 22262 // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem) 22263 // result: (SETGEstore [off] {sym} ptr x mem) 22264 for { 22265 off := auxIntToInt32(v.AuxInt) 22266 sym := auxToSym(v.Aux) 22267 ptr := v_0 22268 if v_1.Op != OpAMD64InvertFlags { 22269 break 22270 } 22271 x := v_1.Args[0] 22272 mem := v_2 22273 v.reset(OpAMD64SETGEstore) 22274 v.AuxInt = int32ToAuxInt(off) 22275 v.Aux = symToAux(sym) 22276 v.AddArg3(ptr, x, mem) 22277 return true 22278 } 22279 // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem) 22280 // cond: is32Bit(int64(off1)+int64(off2)) 22281 // result: (SETLEstore [off1+off2] {sym} base val mem) 22282 for { 22283 off1 := auxIntToInt32(v.AuxInt) 22284 sym := auxToSym(v.Aux) 22285 if v_0.Op != OpAMD64ADDQconst { 22286 break 22287 } 22288 off2 := auxIntToInt32(v_0.AuxInt) 22289 base := v_0.Args[0] 22290 val := v_1 22291 mem := v_2 22292 if !(is32Bit(int64(off1) + int64(off2))) { 22293 break 22294 } 22295 v.reset(OpAMD64SETLEstore) 22296 v.AuxInt = int32ToAuxInt(off1 + off2) 22297 v.Aux = symToAux(sym) 22298 v.AddArg3(base, val, mem) 22299 return true 22300 } 22301 // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 22302 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 22303 // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 22304 for { 22305 off1 := auxIntToInt32(v.AuxInt) 22306 sym1 := auxToSym(v.Aux) 22307 if v_0.Op != OpAMD64LEAQ { 22308 break 22309 } 22310 off2 := auxIntToInt32(v_0.AuxInt) 22311 sym2 := auxToSym(v_0.Aux) 22312 base := v_0.Args[0] 22313 val := v_1 22314 mem := v_2 22315 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 22316 break 22317 } 22318 v.reset(OpAMD64SETLEstore) 22319 v.AuxInt = int32ToAuxInt(off1 + off2) 22320 v.Aux = symToAux(mergeSym(sym1, sym2)) 22321 v.AddArg3(base, val, mem) 22322 return true 22323 } 22324 // match: (SETLEstore [off] {sym} ptr (FlagEQ) mem) 22325 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 22326 for { 22327 off := auxIntToInt32(v.AuxInt) 22328 sym := auxToSym(v.Aux) 22329 ptr := v_0 22330 if v_1.Op != OpAMD64FlagEQ { 22331 break 22332 } 22333 mem := v_2 22334 v.reset(OpAMD64MOVBstore) 22335 v.AuxInt = int32ToAuxInt(off) 22336 v.Aux = symToAux(sym) 22337 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22338 v0.AuxInt = int32ToAuxInt(1) 22339 v.AddArg3(ptr, v0, mem) 22340 return true 22341 } 22342 // match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) 22343 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 22344 for { 22345 off := auxIntToInt32(v.AuxInt) 22346 sym := auxToSym(v.Aux) 22347 ptr := v_0 22348 if v_1.Op != OpAMD64FlagLT_ULT { 22349 break 22350 } 22351 mem := v_2 22352 v.reset(OpAMD64MOVBstore) 22353 v.AuxInt = int32ToAuxInt(off) 22354 v.Aux = symToAux(sym) 22355 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22356 v0.AuxInt = int32ToAuxInt(1) 22357 v.AddArg3(ptr, v0, mem) 22358 return true 22359 } 22360 // match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) 22361 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 22362 for { 22363 off := auxIntToInt32(v.AuxInt) 22364 sym := auxToSym(v.Aux) 22365 ptr := v_0 22366 if v_1.Op != OpAMD64FlagLT_UGT { 22367 break 22368 } 22369 mem := v_2 22370 v.reset(OpAMD64MOVBstore) 22371 v.AuxInt = int32ToAuxInt(off) 22372 v.Aux = symToAux(sym) 22373 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22374 v0.AuxInt = int32ToAuxInt(1) 22375 v.AddArg3(ptr, v0, mem) 22376 return true 22377 } 22378 // match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) 22379 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 22380 for { 22381 off := auxIntToInt32(v.AuxInt) 22382 sym := auxToSym(v.Aux) 22383 ptr := v_0 22384 if v_1.Op != OpAMD64FlagGT_ULT { 22385 break 22386 } 22387 mem := v_2 22388 v.reset(OpAMD64MOVBstore) 22389 v.AuxInt = int32ToAuxInt(off) 22390 v.Aux = symToAux(sym) 22391 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22392 v0.AuxInt = int32ToAuxInt(0) 22393 v.AddArg3(ptr, v0, mem) 22394 return true 22395 } 22396 // match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) 22397 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 22398 for { 22399 off := auxIntToInt32(v.AuxInt) 22400 sym := auxToSym(v.Aux) 22401 ptr := v_0 22402 if v_1.Op != OpAMD64FlagGT_UGT { 22403 break 22404 } 22405 mem := v_2 22406 v.reset(OpAMD64MOVBstore) 22407 v.AuxInt = int32ToAuxInt(off) 22408 v.Aux = symToAux(sym) 22409 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22410 v0.AuxInt = int32ToAuxInt(0) 22411 v.AddArg3(ptr, v0, mem) 22412 return true 22413 } 22414 return false 22415 } 22416 func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { 22417 v_2 := v.Args[2] 22418 v_1 := v.Args[1] 22419 v_0 := v.Args[0] 22420 b := v.Block 22421 typ := &b.Func.Config.Types 22422 // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem) 22423 // result: (SETGstore [off] {sym} ptr x mem) 22424 for { 22425 off := auxIntToInt32(v.AuxInt) 22426 sym := auxToSym(v.Aux) 22427 ptr := v_0 22428 if v_1.Op != OpAMD64InvertFlags { 22429 break 22430 } 22431 x := v_1.Args[0] 22432 mem := v_2 22433 v.reset(OpAMD64SETGstore) 22434 v.AuxInt = int32ToAuxInt(off) 22435 v.Aux = symToAux(sym) 22436 v.AddArg3(ptr, x, mem) 22437 return true 22438 } 22439 // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem) 22440 // cond: is32Bit(int64(off1)+int64(off2)) 22441 // result: (SETLstore [off1+off2] {sym} base val mem) 22442 for { 22443 off1 := auxIntToInt32(v.AuxInt) 22444 sym := auxToSym(v.Aux) 22445 if v_0.Op != OpAMD64ADDQconst { 22446 break 22447 } 22448 off2 := auxIntToInt32(v_0.AuxInt) 22449 base := v_0.Args[0] 22450 val := v_1 22451 mem := v_2 22452 if !(is32Bit(int64(off1) + int64(off2))) { 22453 break 22454 } 22455 v.reset(OpAMD64SETLstore) 22456 v.AuxInt = int32ToAuxInt(off1 + off2) 22457 v.Aux = symToAux(sym) 22458 v.AddArg3(base, val, mem) 22459 return true 22460 } 22461 // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 22462 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 22463 // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 22464 for { 22465 off1 := auxIntToInt32(v.AuxInt) 22466 sym1 := auxToSym(v.Aux) 22467 if v_0.Op != OpAMD64LEAQ { 22468 break 22469 } 22470 off2 := auxIntToInt32(v_0.AuxInt) 22471 sym2 := auxToSym(v_0.Aux) 22472 base := v_0.Args[0] 22473 val := v_1 22474 mem := v_2 22475 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 22476 break 22477 } 22478 v.reset(OpAMD64SETLstore) 22479 v.AuxInt = int32ToAuxInt(off1 + off2) 22480 v.Aux = symToAux(mergeSym(sym1, sym2)) 22481 v.AddArg3(base, val, mem) 22482 return true 22483 } 22484 // match: (SETLstore [off] {sym} ptr (FlagEQ) mem) 22485 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 22486 for { 22487 off := auxIntToInt32(v.AuxInt) 22488 sym := auxToSym(v.Aux) 22489 ptr := v_0 22490 if v_1.Op != OpAMD64FlagEQ { 22491 break 22492 } 22493 mem := v_2 22494 v.reset(OpAMD64MOVBstore) 22495 v.AuxInt = int32ToAuxInt(off) 22496 v.Aux = symToAux(sym) 22497 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22498 v0.AuxInt = int32ToAuxInt(0) 22499 v.AddArg3(ptr, v0, mem) 22500 return true 22501 } 22502 // match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem) 22503 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 22504 for { 22505 off := auxIntToInt32(v.AuxInt) 22506 sym := auxToSym(v.Aux) 22507 ptr := v_0 22508 if v_1.Op != OpAMD64FlagLT_ULT { 22509 break 22510 } 22511 mem := v_2 22512 v.reset(OpAMD64MOVBstore) 22513 v.AuxInt = int32ToAuxInt(off) 22514 v.Aux = symToAux(sym) 22515 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22516 v0.AuxInt = int32ToAuxInt(1) 22517 v.AddArg3(ptr, v0, mem) 22518 return true 22519 } 22520 // match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem) 22521 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 22522 for { 22523 off := auxIntToInt32(v.AuxInt) 22524 sym := auxToSym(v.Aux) 22525 ptr := v_0 22526 if v_1.Op != OpAMD64FlagLT_UGT { 22527 break 22528 } 22529 mem := v_2 22530 v.reset(OpAMD64MOVBstore) 22531 v.AuxInt = int32ToAuxInt(off) 22532 v.Aux = symToAux(sym) 22533 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22534 v0.AuxInt = int32ToAuxInt(1) 22535 v.AddArg3(ptr, v0, mem) 22536 return true 22537 } 22538 // match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem) 22539 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 22540 for { 22541 off := auxIntToInt32(v.AuxInt) 22542 sym := auxToSym(v.Aux) 22543 ptr := v_0 22544 if v_1.Op != OpAMD64FlagGT_ULT { 22545 break 22546 } 22547 mem := v_2 22548 v.reset(OpAMD64MOVBstore) 22549 v.AuxInt = int32ToAuxInt(off) 22550 v.Aux = symToAux(sym) 22551 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22552 v0.AuxInt = int32ToAuxInt(0) 22553 v.AddArg3(ptr, v0, mem) 22554 return true 22555 } 22556 // match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem) 22557 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 22558 for { 22559 off := auxIntToInt32(v.AuxInt) 22560 sym := auxToSym(v.Aux) 22561 ptr := v_0 22562 if v_1.Op != OpAMD64FlagGT_UGT { 22563 break 22564 } 22565 mem := v_2 22566 v.reset(OpAMD64MOVBstore) 22567 v.AuxInt = int32ToAuxInt(off) 22568 v.Aux = symToAux(sym) 22569 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 22570 v0.AuxInt = int32ToAuxInt(0) 22571 v.AddArg3(ptr, v0, mem) 22572 return true 22573 } 22574 return false 22575 } 22576 func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { 22577 v_0 := v.Args[0] 22578 b := v.Block 22579 // match: (SETNE (TESTBconst [1] x)) 22580 // result: (ANDLconst [1] x) 22581 for { 22582 if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 { 22583 break 22584 } 22585 x := v_0.Args[0] 22586 v.reset(OpAMD64ANDLconst) 22587 v.AuxInt = int32ToAuxInt(1) 22588 v.AddArg(x) 22589 return true 22590 } 22591 // match: (SETNE (TESTWconst [1] x)) 22592 // result: (ANDLconst [1] x) 22593 for { 22594 if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 { 22595 break 22596 } 22597 x := v_0.Args[0] 22598 v.reset(OpAMD64ANDLconst) 22599 v.AuxInt = int32ToAuxInt(1) 22600 v.AddArg(x) 22601 return true 22602 } 22603 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 22604 // result: (SETB (BTL x y)) 22605 for { 22606 if v_0.Op != OpAMD64TESTL { 22607 break 22608 } 22609 _ = v_0.Args[1] 22610 v_0_0 := v_0.Args[0] 22611 v_0_1 := v_0.Args[1] 22612 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 22613 if v_0_0.Op != OpAMD64SHLL { 22614 continue 22615 } 22616 x := v_0_0.Args[1] 22617 v_0_0_0 := v_0_0.Args[0] 22618 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { 22619 continue 22620 } 22621 y := v_0_1 22622 v.reset(OpAMD64SETB) 22623 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 22624 v0.AddArg2(x, y) 22625 v.AddArg(v0) 22626 return true 22627 } 22628 break 22629 } 22630 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 22631 // result: (SETB (BTQ x y)) 22632 for { 22633 if v_0.Op != OpAMD64TESTQ { 22634 break 22635 } 22636 _ = v_0.Args[1] 22637 v_0_0 := v_0.Args[0] 22638 v_0_1 := v_0.Args[1] 22639 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 22640 if v_0_0.Op != OpAMD64SHLQ { 22641 continue 22642 } 22643 x := v_0_0.Args[1] 22644 v_0_0_0 := v_0_0.Args[0] 22645 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { 22646 continue 22647 } 22648 y := v_0_1 22649 v.reset(OpAMD64SETB) 22650 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 22651 v0.AddArg2(x, y) 22652 v.AddArg(v0) 22653 return true 22654 } 22655 break 22656 } 22657 // match: (SETNE (TESTLconst [c] x)) 22658 // cond: isUint32PowerOfTwo(int64(c)) 22659 // result: (SETB (BTLconst [int8(log32(c))] x)) 22660 for { 22661 if v_0.Op != OpAMD64TESTLconst { 22662 break 22663 } 22664 c := auxIntToInt32(v_0.AuxInt) 22665 x := v_0.Args[0] 22666 if !(isUint32PowerOfTwo(int64(c))) { 22667 break 22668 } 22669 v.reset(OpAMD64SETB) 22670 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 22671 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 22672 v0.AddArg(x) 22673 v.AddArg(v0) 22674 return true 22675 } 22676 // match: (SETNE (TESTQconst [c] x)) 22677 // cond: isUint64PowerOfTwo(int64(c)) 22678 // result: (SETB (BTQconst [int8(log32(c))] x)) 22679 for { 22680 if v_0.Op != OpAMD64TESTQconst { 22681 break 22682 } 22683 c := auxIntToInt32(v_0.AuxInt) 22684 x := v_0.Args[0] 22685 if !(isUint64PowerOfTwo(int64(c))) { 22686 break 22687 } 22688 v.reset(OpAMD64SETB) 22689 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 22690 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 22691 v0.AddArg(x) 22692 v.AddArg(v0) 22693 return true 22694 } 22695 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 22696 // cond: isUint64PowerOfTwo(c) 22697 // result: (SETB (BTQconst [int8(log64(c))] x)) 22698 for { 22699 if v_0.Op != OpAMD64TESTQ { 22700 break 22701 } 22702 _ = v_0.Args[1] 22703 v_0_0 := v_0.Args[0] 22704 v_0_1 := v_0.Args[1] 22705 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 22706 if v_0_0.Op != OpAMD64MOVQconst { 22707 continue 22708 } 22709 c := auxIntToInt64(v_0_0.AuxInt) 22710 x := v_0_1 22711 if !(isUint64PowerOfTwo(c)) { 22712 continue 22713 } 22714 v.reset(OpAMD64SETB) 22715 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 22716 v0.AuxInt = int8ToAuxInt(int8(log64(c))) 22717 v0.AddArg(x) 22718 v.AddArg(v0) 22719 return true 22720 } 22721 break 22722 } 22723 // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _))) 22724 // result: (SETEQ (CMPLconst [0] s)) 22725 for { 22726 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 { 22727 break 22728 } 22729 s := v_0.Args[0] 22730 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { 22731 break 22732 } 22733 v.reset(OpAMD64SETEQ) 22734 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 22735 v0.AuxInt = int32ToAuxInt(0) 22736 v0.AddArg(s) 22737 v.AddArg(v0) 22738 return true 22739 } 22740 // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _))) 22741 // result: (SETEQ (CMPQconst [0] s)) 22742 for { 22743 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 { 22744 break 22745 } 22746 s := v_0.Args[0] 22747 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { 22748 break 22749 } 22750 v.reset(OpAMD64SETEQ) 22751 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 22752 v0.AuxInt = int32ToAuxInt(0) 22753 v0.AddArg(s) 22754 v.AddArg(v0) 22755 return true 22756 } 22757 // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 22758 // cond: z1==z2 22759 // result: (SETB (BTQconst [63] x)) 22760 for { 22761 if v_0.Op != OpAMD64TESTQ { 22762 break 22763 } 22764 _ = v_0.Args[1] 22765 v_0_0 := v_0.Args[0] 22766 v_0_1 := v_0.Args[1] 22767 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 22768 z1 := v_0_0 22769 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { 22770 continue 22771 } 22772 z1_0 := z1.Args[0] 22773 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 22774 continue 22775 } 22776 x := z1_0.Args[0] 22777 z2 := v_0_1 22778 if !(z1 == z2) { 22779 continue 22780 } 22781 v.reset(OpAMD64SETB) 22782 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 22783 v0.AuxInt = int8ToAuxInt(63) 22784 v0.AddArg(x) 22785 v.AddArg(v0) 22786 return true 22787 } 22788 break 22789 } 22790 // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 22791 // cond: z1==z2 22792 // result: (SETB (BTQconst [31] x)) 22793 for { 22794 if v_0.Op != OpAMD64TESTL { 22795 break 22796 } 22797 _ = v_0.Args[1] 22798 v_0_0 := v_0.Args[0] 22799 v_0_1 := v_0.Args[1] 22800 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 22801 z1 := v_0_0 22802 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { 22803 continue 22804 } 22805 z1_0 := z1.Args[0] 22806 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { 22807 continue 22808 } 22809 x := z1_0.Args[0] 22810 z2 := v_0_1 22811 if !(z1 == z2) { 22812 continue 22813 } 22814 v.reset(OpAMD64SETB) 22815 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 22816 v0.AuxInt = int8ToAuxInt(31) 22817 v0.AddArg(x) 22818 v.AddArg(v0) 22819 return true 22820 } 22821 break 22822 } 22823 // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 22824 // cond: z1==z2 22825 // result: (SETB (BTQconst [0] x)) 22826 for { 22827 if v_0.Op != OpAMD64TESTQ { 22828 break 22829 } 22830 _ = v_0.Args[1] 22831 v_0_0 := v_0.Args[0] 22832 v_0_1 := v_0.Args[1] 22833 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 22834 z1 := v_0_0 22835 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 22836 continue 22837 } 22838 z1_0 := z1.Args[0] 22839 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 22840 continue 22841 } 22842 x := z1_0.Args[0] 22843 z2 := v_0_1 22844 if !(z1 == z2) { 22845 continue 22846 } 22847 v.reset(OpAMD64SETB) 22848 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 22849 v0.AuxInt = int8ToAuxInt(0) 22850 v0.AddArg(x) 22851 v.AddArg(v0) 22852 return true 22853 } 22854 break 22855 } 22856 // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 22857 // cond: z1==z2 22858 // result: (SETB (BTLconst [0] x)) 22859 for { 22860 if v_0.Op != OpAMD64TESTL { 22861 break 22862 } 22863 _ = v_0.Args[1] 22864 v_0_0 := v_0.Args[0] 22865 v_0_1 := v_0.Args[1] 22866 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 22867 z1 := v_0_0 22868 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 22869 continue 22870 } 22871 z1_0 := z1.Args[0] 22872 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 22873 continue 22874 } 22875 x := z1_0.Args[0] 22876 z2 := v_0_1 22877 if !(z1 == z2) { 22878 continue 22879 } 22880 v.reset(OpAMD64SETB) 22881 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 22882 v0.AuxInt = int8ToAuxInt(0) 22883 v0.AddArg(x) 22884 v.AddArg(v0) 22885 return true 22886 } 22887 break 22888 } 22889 // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2)) 22890 // cond: z1==z2 22891 // result: (SETB (BTQconst [63] x)) 22892 for { 22893 if v_0.Op != OpAMD64TESTQ { 22894 break 22895 } 22896 _ = v_0.Args[1] 22897 v_0_0 := v_0.Args[0] 22898 v_0_1 := v_0.Args[1] 22899 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 22900 z1 := v_0_0 22901 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 22902 continue 22903 } 22904 x := z1.Args[0] 22905 z2 := v_0_1 22906 if !(z1 == z2) { 22907 continue 22908 } 22909 v.reset(OpAMD64SETB) 22910 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 22911 v0.AuxInt = int8ToAuxInt(63) 22912 v0.AddArg(x) 22913 v.AddArg(v0) 22914 return true 22915 } 22916 break 22917 } 22918 // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2)) 22919 // cond: z1==z2 22920 // result: (SETB (BTLconst [31] x)) 22921 for { 22922 if v_0.Op != OpAMD64TESTL { 22923 break 22924 } 22925 _ = v_0.Args[1] 22926 v_0_0 := v_0.Args[0] 22927 v_0_1 := v_0.Args[1] 22928 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 22929 z1 := v_0_0 22930 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 22931 continue 22932 } 22933 x := z1.Args[0] 22934 z2 := v_0_1 22935 if !(z1 == z2) { 22936 continue 22937 } 22938 v.reset(OpAMD64SETB) 22939 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 22940 v0.AuxInt = int8ToAuxInt(31) 22941 v0.AddArg(x) 22942 v.AddArg(v0) 22943 return true 22944 } 22945 break 22946 } 22947 // match: (SETNE (InvertFlags x)) 22948 // result: (SETNE x) 22949 for { 22950 if v_0.Op != OpAMD64InvertFlags { 22951 break 22952 } 22953 x := v_0.Args[0] 22954 v.reset(OpAMD64SETNE) 22955 v.AddArg(x) 22956 return true 22957 } 22958 // match: (SETNE (FlagEQ)) 22959 // result: (MOVLconst [0]) 22960 for { 22961 if v_0.Op != OpAMD64FlagEQ { 22962 break 22963 } 22964 v.reset(OpAMD64MOVLconst) 22965 v.AuxInt = int32ToAuxInt(0) 22966 return true 22967 } 22968 // match: (SETNE (FlagLT_ULT)) 22969 // result: (MOVLconst [1]) 22970 for { 22971 if v_0.Op != OpAMD64FlagLT_ULT { 22972 break 22973 } 22974 v.reset(OpAMD64MOVLconst) 22975 v.AuxInt = int32ToAuxInt(1) 22976 return true 22977 } 22978 // match: (SETNE (FlagLT_UGT)) 22979 // result: (MOVLconst [1]) 22980 for { 22981 if v_0.Op != OpAMD64FlagLT_UGT { 22982 break 22983 } 22984 v.reset(OpAMD64MOVLconst) 22985 v.AuxInt = int32ToAuxInt(1) 22986 return true 22987 } 22988 // match: (SETNE (FlagGT_ULT)) 22989 // result: (MOVLconst [1]) 22990 for { 22991 if v_0.Op != OpAMD64FlagGT_ULT { 22992 break 22993 } 22994 v.reset(OpAMD64MOVLconst) 22995 v.AuxInt = int32ToAuxInt(1) 22996 return true 22997 } 22998 // match: (SETNE (FlagGT_UGT)) 22999 // result: (MOVLconst [1]) 23000 for { 23001 if v_0.Op != OpAMD64FlagGT_UGT { 23002 break 23003 } 23004 v.reset(OpAMD64MOVLconst) 23005 v.AuxInt = int32ToAuxInt(1) 23006 return true 23007 } 23008 return false 23009 } 23010 func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { 23011 v_2 := v.Args[2] 23012 v_1 := v.Args[1] 23013 v_0 := v.Args[0] 23014 b := v.Block 23015 typ := &b.Func.Config.Types 23016 // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 23017 // result: (SETBstore [off] {sym} ptr (BTL x y) mem) 23018 for { 23019 off := auxIntToInt32(v.AuxInt) 23020 sym := auxToSym(v.Aux) 23021 ptr := v_0 23022 if v_1.Op != OpAMD64TESTL { 23023 break 23024 } 23025 _ = v_1.Args[1] 23026 v_1_0 := v_1.Args[0] 23027 v_1_1 := v_1.Args[1] 23028 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 23029 if v_1_0.Op != OpAMD64SHLL { 23030 continue 23031 } 23032 x := v_1_0.Args[1] 23033 v_1_0_0 := v_1_0.Args[0] 23034 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 { 23035 continue 23036 } 23037 y := v_1_1 23038 mem := v_2 23039 v.reset(OpAMD64SETBstore) 23040 v.AuxInt = int32ToAuxInt(off) 23041 v.Aux = symToAux(sym) 23042 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 23043 v0.AddArg2(x, y) 23044 v.AddArg3(ptr, v0, mem) 23045 return true 23046 } 23047 break 23048 } 23049 // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 23050 // result: (SETBstore [off] {sym} ptr (BTQ x y) mem) 23051 for { 23052 off := auxIntToInt32(v.AuxInt) 23053 sym := auxToSym(v.Aux) 23054 ptr := v_0 23055 if v_1.Op != OpAMD64TESTQ { 23056 break 23057 } 23058 _ = v_1.Args[1] 23059 v_1_0 := v_1.Args[0] 23060 v_1_1 := v_1.Args[1] 23061 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 23062 if v_1_0.Op != OpAMD64SHLQ { 23063 continue 23064 } 23065 x := v_1_0.Args[1] 23066 v_1_0_0 := v_1_0.Args[0] 23067 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 { 23068 continue 23069 } 23070 y := v_1_1 23071 mem := v_2 23072 v.reset(OpAMD64SETBstore) 23073 v.AuxInt = int32ToAuxInt(off) 23074 v.Aux = symToAux(sym) 23075 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 23076 v0.AddArg2(x, y) 23077 v.AddArg3(ptr, v0, mem) 23078 return true 23079 } 23080 break 23081 } 23082 // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem) 23083 // cond: isUint32PowerOfTwo(int64(c)) 23084 // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem) 23085 for { 23086 off := auxIntToInt32(v.AuxInt) 23087 sym := auxToSym(v.Aux) 23088 ptr := v_0 23089 if v_1.Op != OpAMD64TESTLconst { 23090 break 23091 } 23092 c := auxIntToInt32(v_1.AuxInt) 23093 x := v_1.Args[0] 23094 mem := v_2 23095 if !(isUint32PowerOfTwo(int64(c))) { 23096 break 23097 } 23098 v.reset(OpAMD64SETBstore) 23099 v.AuxInt = int32ToAuxInt(off) 23100 v.Aux = symToAux(sym) 23101 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 23102 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 23103 v0.AddArg(x) 23104 v.AddArg3(ptr, v0, mem) 23105 return true 23106 } 23107 // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem) 23108 // cond: isUint64PowerOfTwo(int64(c)) 23109 // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem) 23110 for { 23111 off := auxIntToInt32(v.AuxInt) 23112 sym := auxToSym(v.Aux) 23113 ptr := v_0 23114 if v_1.Op != OpAMD64TESTQconst { 23115 break 23116 } 23117 c := auxIntToInt32(v_1.AuxInt) 23118 x := v_1.Args[0] 23119 mem := v_2 23120 if !(isUint64PowerOfTwo(int64(c))) { 23121 break 23122 } 23123 v.reset(OpAMD64SETBstore) 23124 v.AuxInt = int32ToAuxInt(off) 23125 v.Aux = symToAux(sym) 23126 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 23127 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 23128 v0.AddArg(x) 23129 v.AddArg3(ptr, v0, mem) 23130 return true 23131 } 23132 // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 23133 // cond: isUint64PowerOfTwo(c) 23134 // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem) 23135 for { 23136 off := auxIntToInt32(v.AuxInt) 23137 sym := auxToSym(v.Aux) 23138 ptr := v_0 23139 if v_1.Op != OpAMD64TESTQ { 23140 break 23141 } 23142 _ = v_1.Args[1] 23143 v_1_0 := v_1.Args[0] 23144 v_1_1 := v_1.Args[1] 23145 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 23146 if v_1_0.Op != OpAMD64MOVQconst { 23147 continue 23148 } 23149 c := auxIntToInt64(v_1_0.AuxInt) 23150 x := v_1_1 23151 mem := v_2 23152 if !(isUint64PowerOfTwo(c)) { 23153 continue 23154 } 23155 v.reset(OpAMD64SETBstore) 23156 v.AuxInt = int32ToAuxInt(off) 23157 v.Aux = symToAux(sym) 23158 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 23159 v0.AuxInt = int8ToAuxInt(int8(log64(c))) 23160 v0.AddArg(x) 23161 v.AddArg3(ptr, v0, mem) 23162 return true 23163 } 23164 break 23165 } 23166 // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) 23167 // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem) 23168 for { 23169 off := auxIntToInt32(v.AuxInt) 23170 sym := auxToSym(v.Aux) 23171 ptr := v_0 23172 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 { 23173 break 23174 } 23175 s := v_1.Args[0] 23176 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { 23177 break 23178 } 23179 mem := v_2 23180 v.reset(OpAMD64SETEQstore) 23181 v.AuxInt = int32ToAuxInt(off) 23182 v.Aux = symToAux(sym) 23183 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 23184 v0.AuxInt = int32ToAuxInt(0) 23185 v0.AddArg(s) 23186 v.AddArg3(ptr, v0, mem) 23187 return true 23188 } 23189 // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) 23190 // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem) 23191 for { 23192 off := auxIntToInt32(v.AuxInt) 23193 sym := auxToSym(v.Aux) 23194 ptr := v_0 23195 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 { 23196 break 23197 } 23198 s := v_1.Args[0] 23199 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { 23200 break 23201 } 23202 mem := v_2 23203 v.reset(OpAMD64SETEQstore) 23204 v.AuxInt = int32ToAuxInt(off) 23205 v.Aux = symToAux(sym) 23206 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 23207 v0.AuxInt = int32ToAuxInt(0) 23208 v0.AddArg(s) 23209 v.AddArg3(ptr, v0, mem) 23210 return true 23211 } 23212 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) 23213 // cond: z1==z2 23214 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 23215 for { 23216 off := auxIntToInt32(v.AuxInt) 23217 sym := auxToSym(v.Aux) 23218 ptr := v_0 23219 if v_1.Op != OpAMD64TESTQ { 23220 break 23221 } 23222 _ = v_1.Args[1] 23223 v_1_0 := v_1.Args[0] 23224 v_1_1 := v_1.Args[1] 23225 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 23226 z1 := v_1_0 23227 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { 23228 continue 23229 } 23230 z1_0 := z1.Args[0] 23231 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 23232 continue 23233 } 23234 x := z1_0.Args[0] 23235 z2 := v_1_1 23236 mem := v_2 23237 if !(z1 == z2) { 23238 continue 23239 } 23240 v.reset(OpAMD64SETBstore) 23241 v.AuxInt = int32ToAuxInt(off) 23242 v.Aux = symToAux(sym) 23243 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 23244 v0.AuxInt = int8ToAuxInt(63) 23245 v0.AddArg(x) 23246 v.AddArg3(ptr, v0, mem) 23247 return true 23248 } 23249 break 23250 } 23251 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) 23252 // cond: z1==z2 23253 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 23254 for { 23255 off := auxIntToInt32(v.AuxInt) 23256 sym := auxToSym(v.Aux) 23257 ptr := v_0 23258 if v_1.Op != OpAMD64TESTL { 23259 break 23260 } 23261 _ = v_1.Args[1] 23262 v_1_0 := v_1.Args[0] 23263 v_1_1 := v_1.Args[1] 23264 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 23265 z1 := v_1_0 23266 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { 23267 continue 23268 } 23269 z1_0 := z1.Args[0] 23270 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 23271 continue 23272 } 23273 x := z1_0.Args[0] 23274 z2 := v_1_1 23275 mem := v_2 23276 if !(z1 == z2) { 23277 continue 23278 } 23279 v.reset(OpAMD64SETBstore) 23280 v.AuxInt = int32ToAuxInt(off) 23281 v.Aux = symToAux(sym) 23282 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 23283 v0.AuxInt = int8ToAuxInt(31) 23284 v0.AddArg(x) 23285 v.AddArg3(ptr, v0, mem) 23286 return true 23287 } 23288 break 23289 } 23290 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) 23291 // cond: z1==z2 23292 // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem) 23293 for { 23294 off := auxIntToInt32(v.AuxInt) 23295 sym := auxToSym(v.Aux) 23296 ptr := v_0 23297 if v_1.Op != OpAMD64TESTQ { 23298 break 23299 } 23300 _ = v_1.Args[1] 23301 v_1_0 := v_1.Args[0] 23302 v_1_1 := v_1.Args[1] 23303 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 23304 z1 := v_1_0 23305 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 23306 continue 23307 } 23308 z1_0 := z1.Args[0] 23309 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 23310 continue 23311 } 23312 x := z1_0.Args[0] 23313 z2 := v_1_1 23314 mem := v_2 23315 if !(z1 == z2) { 23316 continue 23317 } 23318 v.reset(OpAMD64SETBstore) 23319 v.AuxInt = int32ToAuxInt(off) 23320 v.Aux = symToAux(sym) 23321 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 23322 v0.AuxInt = int8ToAuxInt(0) 23323 v0.AddArg(x) 23324 v.AddArg3(ptr, v0, mem) 23325 return true 23326 } 23327 break 23328 } 23329 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) 23330 // cond: z1==z2 23331 // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem) 23332 for { 23333 off := auxIntToInt32(v.AuxInt) 23334 sym := auxToSym(v.Aux) 23335 ptr := v_0 23336 if v_1.Op != OpAMD64TESTL { 23337 break 23338 } 23339 _ = v_1.Args[1] 23340 v_1_0 := v_1.Args[0] 23341 v_1_1 := v_1.Args[1] 23342 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 23343 z1 := v_1_0 23344 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 23345 continue 23346 } 23347 z1_0 := z1.Args[0] 23348 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 23349 continue 23350 } 23351 x := z1_0.Args[0] 23352 z2 := v_1_1 23353 mem := v_2 23354 if !(z1 == z2) { 23355 continue 23356 } 23357 v.reset(OpAMD64SETBstore) 23358 v.AuxInt = int32ToAuxInt(off) 23359 v.Aux = symToAux(sym) 23360 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 23361 v0.AuxInt = int8ToAuxInt(0) 23362 v0.AddArg(x) 23363 v.AddArg3(ptr, v0, mem) 23364 return true 23365 } 23366 break 23367 } 23368 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) 23369 // cond: z1==z2 23370 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 23371 for { 23372 off := auxIntToInt32(v.AuxInt) 23373 sym := auxToSym(v.Aux) 23374 ptr := v_0 23375 if v_1.Op != OpAMD64TESTQ { 23376 break 23377 } 23378 _ = v_1.Args[1] 23379 v_1_0 := v_1.Args[0] 23380 v_1_1 := v_1.Args[1] 23381 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 23382 z1 := v_1_0 23383 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 23384 continue 23385 } 23386 x := z1.Args[0] 23387 z2 := v_1_1 23388 mem := v_2 23389 if !(z1 == z2) { 23390 continue 23391 } 23392 v.reset(OpAMD64SETBstore) 23393 v.AuxInt = int32ToAuxInt(off) 23394 v.Aux = symToAux(sym) 23395 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 23396 v0.AuxInt = int8ToAuxInt(63) 23397 v0.AddArg(x) 23398 v.AddArg3(ptr, v0, mem) 23399 return true 23400 } 23401 break 23402 } 23403 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) 23404 // cond: z1==z2 23405 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 23406 for { 23407 off := auxIntToInt32(v.AuxInt) 23408 sym := auxToSym(v.Aux) 23409 ptr := v_0 23410 if v_1.Op != OpAMD64TESTL { 23411 break 23412 } 23413 _ = v_1.Args[1] 23414 v_1_0 := v_1.Args[0] 23415 v_1_1 := v_1.Args[1] 23416 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 23417 z1 := v_1_0 23418 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 23419 continue 23420 } 23421 x := z1.Args[0] 23422 z2 := v_1_1 23423 mem := v_2 23424 if !(z1 == z2) { 23425 continue 23426 } 23427 v.reset(OpAMD64SETBstore) 23428 v.AuxInt = int32ToAuxInt(off) 23429 v.Aux = symToAux(sym) 23430 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 23431 v0.AuxInt = int8ToAuxInt(31) 23432 v0.AddArg(x) 23433 v.AddArg3(ptr, v0, mem) 23434 return true 23435 } 23436 break 23437 } 23438 // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem) 23439 // result: (SETNEstore [off] {sym} ptr x mem) 23440 for { 23441 off := auxIntToInt32(v.AuxInt) 23442 sym := auxToSym(v.Aux) 23443 ptr := v_0 23444 if v_1.Op != OpAMD64InvertFlags { 23445 break 23446 } 23447 x := v_1.Args[0] 23448 mem := v_2 23449 v.reset(OpAMD64SETNEstore) 23450 v.AuxInt = int32ToAuxInt(off) 23451 v.Aux = symToAux(sym) 23452 v.AddArg3(ptr, x, mem) 23453 return true 23454 } 23455 // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem) 23456 // cond: is32Bit(int64(off1)+int64(off2)) 23457 // result: (SETNEstore [off1+off2] {sym} base val mem) 23458 for { 23459 off1 := auxIntToInt32(v.AuxInt) 23460 sym := auxToSym(v.Aux) 23461 if v_0.Op != OpAMD64ADDQconst { 23462 break 23463 } 23464 off2 := auxIntToInt32(v_0.AuxInt) 23465 base := v_0.Args[0] 23466 val := v_1 23467 mem := v_2 23468 if !(is32Bit(int64(off1) + int64(off2))) { 23469 break 23470 } 23471 v.reset(OpAMD64SETNEstore) 23472 v.AuxInt = int32ToAuxInt(off1 + off2) 23473 v.Aux = symToAux(sym) 23474 v.AddArg3(base, val, mem) 23475 return true 23476 } 23477 // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 23478 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 23479 // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 23480 for { 23481 off1 := auxIntToInt32(v.AuxInt) 23482 sym1 := auxToSym(v.Aux) 23483 if v_0.Op != OpAMD64LEAQ { 23484 break 23485 } 23486 off2 := auxIntToInt32(v_0.AuxInt) 23487 sym2 := auxToSym(v_0.Aux) 23488 base := v_0.Args[0] 23489 val := v_1 23490 mem := v_2 23491 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 23492 break 23493 } 23494 v.reset(OpAMD64SETNEstore) 23495 v.AuxInt = int32ToAuxInt(off1 + off2) 23496 v.Aux = symToAux(mergeSym(sym1, sym2)) 23497 v.AddArg3(base, val, mem) 23498 return true 23499 } 23500 // match: (SETNEstore [off] {sym} ptr (FlagEQ) mem) 23501 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 23502 for { 23503 off := auxIntToInt32(v.AuxInt) 23504 sym := auxToSym(v.Aux) 23505 ptr := v_0 23506 if v_1.Op != OpAMD64FlagEQ { 23507 break 23508 } 23509 mem := v_2 23510 v.reset(OpAMD64MOVBstore) 23511 v.AuxInt = int32ToAuxInt(off) 23512 v.Aux = symToAux(sym) 23513 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 23514 v0.AuxInt = int32ToAuxInt(0) 23515 v.AddArg3(ptr, v0, mem) 23516 return true 23517 } 23518 // match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) 23519 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 23520 for { 23521 off := auxIntToInt32(v.AuxInt) 23522 sym := auxToSym(v.Aux) 23523 ptr := v_0 23524 if v_1.Op != OpAMD64FlagLT_ULT { 23525 break 23526 } 23527 mem := v_2 23528 v.reset(OpAMD64MOVBstore) 23529 v.AuxInt = int32ToAuxInt(off) 23530 v.Aux = symToAux(sym) 23531 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 23532 v0.AuxInt = int32ToAuxInt(1) 23533 v.AddArg3(ptr, v0, mem) 23534 return true 23535 } 23536 // match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) 23537 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 23538 for { 23539 off := auxIntToInt32(v.AuxInt) 23540 sym := auxToSym(v.Aux) 23541 ptr := v_0 23542 if v_1.Op != OpAMD64FlagLT_UGT { 23543 break 23544 } 23545 mem := v_2 23546 v.reset(OpAMD64MOVBstore) 23547 v.AuxInt = int32ToAuxInt(off) 23548 v.Aux = symToAux(sym) 23549 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 23550 v0.AuxInt = int32ToAuxInt(1) 23551 v.AddArg3(ptr, v0, mem) 23552 return true 23553 } 23554 // match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) 23555 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 23556 for { 23557 off := auxIntToInt32(v.AuxInt) 23558 sym := auxToSym(v.Aux) 23559 ptr := v_0 23560 if v_1.Op != OpAMD64FlagGT_ULT { 23561 break 23562 } 23563 mem := v_2 23564 v.reset(OpAMD64MOVBstore) 23565 v.AuxInt = int32ToAuxInt(off) 23566 v.Aux = symToAux(sym) 23567 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 23568 v0.AuxInt = int32ToAuxInt(1) 23569 v.AddArg3(ptr, v0, mem) 23570 return true 23571 } 23572 // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) 23573 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 23574 for { 23575 off := auxIntToInt32(v.AuxInt) 23576 sym := auxToSym(v.Aux) 23577 ptr := v_0 23578 if v_1.Op != OpAMD64FlagGT_UGT { 23579 break 23580 } 23581 mem := v_2 23582 v.reset(OpAMD64MOVBstore) 23583 v.AuxInt = int32ToAuxInt(off) 23584 v.Aux = symToAux(sym) 23585 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 23586 v0.AuxInt = int32ToAuxInt(1) 23587 v.AddArg3(ptr, v0, mem) 23588 return true 23589 } 23590 return false 23591 } 23592 func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { 23593 v_1 := v.Args[1] 23594 v_0 := v.Args[0] 23595 b := v.Block 23596 // match: (SHLL x (MOVQconst [c])) 23597 // result: (SHLLconst [int8(c&31)] x) 23598 for { 23599 x := v_0 23600 if v_1.Op != OpAMD64MOVQconst { 23601 break 23602 } 23603 c := auxIntToInt64(v_1.AuxInt) 23604 v.reset(OpAMD64SHLLconst) 23605 v.AuxInt = int8ToAuxInt(int8(c & 31)) 23606 v.AddArg(x) 23607 return true 23608 } 23609 // match: (SHLL x (MOVLconst [c])) 23610 // result: (SHLLconst [int8(c&31)] x) 23611 for { 23612 x := v_0 23613 if v_1.Op != OpAMD64MOVLconst { 23614 break 23615 } 23616 c := auxIntToInt32(v_1.AuxInt) 23617 v.reset(OpAMD64SHLLconst) 23618 v.AuxInt = int8ToAuxInt(int8(c & 31)) 23619 v.AddArg(x) 23620 return true 23621 } 23622 // match: (SHLL x (ADDQconst [c] y)) 23623 // cond: c & 31 == 0 23624 // result: (SHLL x y) 23625 for { 23626 x := v_0 23627 if v_1.Op != OpAMD64ADDQconst { 23628 break 23629 } 23630 c := auxIntToInt32(v_1.AuxInt) 23631 y := v_1.Args[0] 23632 if !(c&31 == 0) { 23633 break 23634 } 23635 v.reset(OpAMD64SHLL) 23636 v.AddArg2(x, y) 23637 return true 23638 } 23639 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 23640 // cond: c & 31 == 0 23641 // result: (SHLL x (NEGQ <t> y)) 23642 for { 23643 x := v_0 23644 if v_1.Op != OpAMD64NEGQ { 23645 break 23646 } 23647 t := v_1.Type 23648 v_1_0 := v_1.Args[0] 23649 if v_1_0.Op != OpAMD64ADDQconst { 23650 break 23651 } 23652 c := auxIntToInt32(v_1_0.AuxInt) 23653 y := v_1_0.Args[0] 23654 if !(c&31 == 0) { 23655 break 23656 } 23657 v.reset(OpAMD64SHLL) 23658 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 23659 v0.AddArg(y) 23660 v.AddArg2(x, v0) 23661 return true 23662 } 23663 // match: (SHLL x (ANDQconst [c] y)) 23664 // cond: c & 31 == 31 23665 // result: (SHLL x y) 23666 for { 23667 x := v_0 23668 if v_1.Op != OpAMD64ANDQconst { 23669 break 23670 } 23671 c := auxIntToInt32(v_1.AuxInt) 23672 y := v_1.Args[0] 23673 if !(c&31 == 31) { 23674 break 23675 } 23676 v.reset(OpAMD64SHLL) 23677 v.AddArg2(x, y) 23678 return true 23679 } 23680 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 23681 // cond: c & 31 == 31 23682 // result: (SHLL x (NEGQ <t> y)) 23683 for { 23684 x := v_0 23685 if v_1.Op != OpAMD64NEGQ { 23686 break 23687 } 23688 t := v_1.Type 23689 v_1_0 := v_1.Args[0] 23690 if v_1_0.Op != OpAMD64ANDQconst { 23691 break 23692 } 23693 c := auxIntToInt32(v_1_0.AuxInt) 23694 y := v_1_0.Args[0] 23695 if !(c&31 == 31) { 23696 break 23697 } 23698 v.reset(OpAMD64SHLL) 23699 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 23700 v0.AddArg(y) 23701 v.AddArg2(x, v0) 23702 return true 23703 } 23704 // match: (SHLL x (ADDLconst [c] y)) 23705 // cond: c & 31 == 0 23706 // result: (SHLL x y) 23707 for { 23708 x := v_0 23709 if v_1.Op != OpAMD64ADDLconst { 23710 break 23711 } 23712 c := auxIntToInt32(v_1.AuxInt) 23713 y := v_1.Args[0] 23714 if !(c&31 == 0) { 23715 break 23716 } 23717 v.reset(OpAMD64SHLL) 23718 v.AddArg2(x, y) 23719 return true 23720 } 23721 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 23722 // cond: c & 31 == 0 23723 // result: (SHLL x (NEGL <t> y)) 23724 for { 23725 x := v_0 23726 if v_1.Op != OpAMD64NEGL { 23727 break 23728 } 23729 t := v_1.Type 23730 v_1_0 := v_1.Args[0] 23731 if v_1_0.Op != OpAMD64ADDLconst { 23732 break 23733 } 23734 c := auxIntToInt32(v_1_0.AuxInt) 23735 y := v_1_0.Args[0] 23736 if !(c&31 == 0) { 23737 break 23738 } 23739 v.reset(OpAMD64SHLL) 23740 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 23741 v0.AddArg(y) 23742 v.AddArg2(x, v0) 23743 return true 23744 } 23745 // match: (SHLL x (ANDLconst [c] y)) 23746 // cond: c & 31 == 31 23747 // result: (SHLL x y) 23748 for { 23749 x := v_0 23750 if v_1.Op != OpAMD64ANDLconst { 23751 break 23752 } 23753 c := auxIntToInt32(v_1.AuxInt) 23754 y := v_1.Args[0] 23755 if !(c&31 == 31) { 23756 break 23757 } 23758 v.reset(OpAMD64SHLL) 23759 v.AddArg2(x, y) 23760 return true 23761 } 23762 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 23763 // cond: c & 31 == 31 23764 // result: (SHLL x (NEGL <t> y)) 23765 for { 23766 x := v_0 23767 if v_1.Op != OpAMD64NEGL { 23768 break 23769 } 23770 t := v_1.Type 23771 v_1_0 := v_1.Args[0] 23772 if v_1_0.Op != OpAMD64ANDLconst { 23773 break 23774 } 23775 c := auxIntToInt32(v_1_0.AuxInt) 23776 y := v_1_0.Args[0] 23777 if !(c&31 == 31) { 23778 break 23779 } 23780 v.reset(OpAMD64SHLL) 23781 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 23782 v0.AddArg(y) 23783 v.AddArg2(x, v0) 23784 return true 23785 } 23786 // match: (SHLL l:(MOVLload [off] {sym} ptr mem) x) 23787 // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) 23788 // result: (SHLXLload [off] {sym} ptr x mem) 23789 for { 23790 l := v_0 23791 if l.Op != OpAMD64MOVLload { 23792 break 23793 } 23794 off := auxIntToInt32(l.AuxInt) 23795 sym := auxToSym(l.Aux) 23796 mem := l.Args[1] 23797 ptr := l.Args[0] 23798 x := v_1 23799 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { 23800 break 23801 } 23802 v.reset(OpAMD64SHLXLload) 23803 v.AuxInt = int32ToAuxInt(off) 23804 v.Aux = symToAux(sym) 23805 v.AddArg3(ptr, x, mem) 23806 return true 23807 } 23808 return false 23809 } 23810 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool { 23811 v_0 := v.Args[0] 23812 // match: (SHLLconst [1] (SHRLconst [1] x)) 23813 // result: (BTRLconst [0] x) 23814 for { 23815 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 { 23816 break 23817 } 23818 x := v_0.Args[0] 23819 v.reset(OpAMD64BTRLconst) 23820 v.AuxInt = int8ToAuxInt(0) 23821 v.AddArg(x) 23822 return true 23823 } 23824 // match: (SHLLconst x [0]) 23825 // result: x 23826 for { 23827 if auxIntToInt8(v.AuxInt) != 0 { 23828 break 23829 } 23830 x := v_0 23831 v.copyOf(x) 23832 return true 23833 } 23834 // match: (SHLLconst [d] (MOVLconst [c])) 23835 // result: (MOVLconst [c << uint64(d)]) 23836 for { 23837 d := auxIntToInt8(v.AuxInt) 23838 if v_0.Op != OpAMD64MOVLconst { 23839 break 23840 } 23841 c := auxIntToInt32(v_0.AuxInt) 23842 v.reset(OpAMD64MOVLconst) 23843 v.AuxInt = int32ToAuxInt(c << uint64(d)) 23844 return true 23845 } 23846 return false 23847 } 23848 func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { 23849 v_1 := v.Args[1] 23850 v_0 := v.Args[0] 23851 b := v.Block 23852 // match: (SHLQ x (MOVQconst [c])) 23853 // result: (SHLQconst [int8(c&63)] x) 23854 for { 23855 x := v_0 23856 if v_1.Op != OpAMD64MOVQconst { 23857 break 23858 } 23859 c := auxIntToInt64(v_1.AuxInt) 23860 v.reset(OpAMD64SHLQconst) 23861 v.AuxInt = int8ToAuxInt(int8(c & 63)) 23862 v.AddArg(x) 23863 return true 23864 } 23865 // match: (SHLQ x (MOVLconst [c])) 23866 // result: (SHLQconst [int8(c&63)] x) 23867 for { 23868 x := v_0 23869 if v_1.Op != OpAMD64MOVLconst { 23870 break 23871 } 23872 c := auxIntToInt32(v_1.AuxInt) 23873 v.reset(OpAMD64SHLQconst) 23874 v.AuxInt = int8ToAuxInt(int8(c & 63)) 23875 v.AddArg(x) 23876 return true 23877 } 23878 // match: (SHLQ x (ADDQconst [c] y)) 23879 // cond: c & 63 == 0 23880 // result: (SHLQ x y) 23881 for { 23882 x := v_0 23883 if v_1.Op != OpAMD64ADDQconst { 23884 break 23885 } 23886 c := auxIntToInt32(v_1.AuxInt) 23887 y := v_1.Args[0] 23888 if !(c&63 == 0) { 23889 break 23890 } 23891 v.reset(OpAMD64SHLQ) 23892 v.AddArg2(x, y) 23893 return true 23894 } 23895 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 23896 // cond: c & 63 == 0 23897 // result: (SHLQ x (NEGQ <t> y)) 23898 for { 23899 x := v_0 23900 if v_1.Op != OpAMD64NEGQ { 23901 break 23902 } 23903 t := v_1.Type 23904 v_1_0 := v_1.Args[0] 23905 if v_1_0.Op != OpAMD64ADDQconst { 23906 break 23907 } 23908 c := auxIntToInt32(v_1_0.AuxInt) 23909 y := v_1_0.Args[0] 23910 if !(c&63 == 0) { 23911 break 23912 } 23913 v.reset(OpAMD64SHLQ) 23914 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 23915 v0.AddArg(y) 23916 v.AddArg2(x, v0) 23917 return true 23918 } 23919 // match: (SHLQ x (ANDQconst [c] y)) 23920 // cond: c & 63 == 63 23921 // result: (SHLQ x y) 23922 for { 23923 x := v_0 23924 if v_1.Op != OpAMD64ANDQconst { 23925 break 23926 } 23927 c := auxIntToInt32(v_1.AuxInt) 23928 y := v_1.Args[0] 23929 if !(c&63 == 63) { 23930 break 23931 } 23932 v.reset(OpAMD64SHLQ) 23933 v.AddArg2(x, y) 23934 return true 23935 } 23936 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 23937 // cond: c & 63 == 63 23938 // result: (SHLQ x (NEGQ <t> y)) 23939 for { 23940 x := v_0 23941 if v_1.Op != OpAMD64NEGQ { 23942 break 23943 } 23944 t := v_1.Type 23945 v_1_0 := v_1.Args[0] 23946 if v_1_0.Op != OpAMD64ANDQconst { 23947 break 23948 } 23949 c := auxIntToInt32(v_1_0.AuxInt) 23950 y := v_1_0.Args[0] 23951 if !(c&63 == 63) { 23952 break 23953 } 23954 v.reset(OpAMD64SHLQ) 23955 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 23956 v0.AddArg(y) 23957 v.AddArg2(x, v0) 23958 return true 23959 } 23960 // match: (SHLQ x (ADDLconst [c] y)) 23961 // cond: c & 63 == 0 23962 // result: (SHLQ x y) 23963 for { 23964 x := v_0 23965 if v_1.Op != OpAMD64ADDLconst { 23966 break 23967 } 23968 c := auxIntToInt32(v_1.AuxInt) 23969 y := v_1.Args[0] 23970 if !(c&63 == 0) { 23971 break 23972 } 23973 v.reset(OpAMD64SHLQ) 23974 v.AddArg2(x, y) 23975 return true 23976 } 23977 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 23978 // cond: c & 63 == 0 23979 // result: (SHLQ x (NEGL <t> y)) 23980 for { 23981 x := v_0 23982 if v_1.Op != OpAMD64NEGL { 23983 break 23984 } 23985 t := v_1.Type 23986 v_1_0 := v_1.Args[0] 23987 if v_1_0.Op != OpAMD64ADDLconst { 23988 break 23989 } 23990 c := auxIntToInt32(v_1_0.AuxInt) 23991 y := v_1_0.Args[0] 23992 if !(c&63 == 0) { 23993 break 23994 } 23995 v.reset(OpAMD64SHLQ) 23996 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 23997 v0.AddArg(y) 23998 v.AddArg2(x, v0) 23999 return true 24000 } 24001 // match: (SHLQ x (ANDLconst [c] y)) 24002 // cond: c & 63 == 63 24003 // result: (SHLQ x y) 24004 for { 24005 x := v_0 24006 if v_1.Op != OpAMD64ANDLconst { 24007 break 24008 } 24009 c := auxIntToInt32(v_1.AuxInt) 24010 y := v_1.Args[0] 24011 if !(c&63 == 63) { 24012 break 24013 } 24014 v.reset(OpAMD64SHLQ) 24015 v.AddArg2(x, y) 24016 return true 24017 } 24018 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 24019 // cond: c & 63 == 63 24020 // result: (SHLQ x (NEGL <t> y)) 24021 for { 24022 x := v_0 24023 if v_1.Op != OpAMD64NEGL { 24024 break 24025 } 24026 t := v_1.Type 24027 v_1_0 := v_1.Args[0] 24028 if v_1_0.Op != OpAMD64ANDLconst { 24029 break 24030 } 24031 c := auxIntToInt32(v_1_0.AuxInt) 24032 y := v_1_0.Args[0] 24033 if !(c&63 == 63) { 24034 break 24035 } 24036 v.reset(OpAMD64SHLQ) 24037 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 24038 v0.AddArg(y) 24039 v.AddArg2(x, v0) 24040 return true 24041 } 24042 // match: (SHLQ l:(MOVQload [off] {sym} ptr mem) x) 24043 // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) 24044 // result: (SHLXQload [off] {sym} ptr x mem) 24045 for { 24046 l := v_0 24047 if l.Op != OpAMD64MOVQload { 24048 break 24049 } 24050 off := auxIntToInt32(l.AuxInt) 24051 sym := auxToSym(l.Aux) 24052 mem := l.Args[1] 24053 ptr := l.Args[0] 24054 x := v_1 24055 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { 24056 break 24057 } 24058 v.reset(OpAMD64SHLXQload) 24059 v.AuxInt = int32ToAuxInt(off) 24060 v.Aux = symToAux(sym) 24061 v.AddArg3(ptr, x, mem) 24062 return true 24063 } 24064 return false 24065 } 24066 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool { 24067 v_0 := v.Args[0] 24068 // match: (SHLQconst [1] (SHRQconst [1] x)) 24069 // result: (BTRQconst [0] x) 24070 for { 24071 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 { 24072 break 24073 } 24074 x := v_0.Args[0] 24075 v.reset(OpAMD64BTRQconst) 24076 v.AuxInt = int8ToAuxInt(0) 24077 v.AddArg(x) 24078 return true 24079 } 24080 // match: (SHLQconst x [0]) 24081 // result: x 24082 for { 24083 if auxIntToInt8(v.AuxInt) != 0 { 24084 break 24085 } 24086 x := v_0 24087 v.copyOf(x) 24088 return true 24089 } 24090 // match: (SHLQconst [d] (MOVQconst [c])) 24091 // result: (MOVQconst [c << uint64(d)]) 24092 for { 24093 d := auxIntToInt8(v.AuxInt) 24094 if v_0.Op != OpAMD64MOVQconst { 24095 break 24096 } 24097 c := auxIntToInt64(v_0.AuxInt) 24098 v.reset(OpAMD64MOVQconst) 24099 v.AuxInt = int64ToAuxInt(c << uint64(d)) 24100 return true 24101 } 24102 // match: (SHLQconst [d] (MOVLconst [c])) 24103 // result: (MOVQconst [int64(c) << uint64(d)]) 24104 for { 24105 d := auxIntToInt8(v.AuxInt) 24106 if v_0.Op != OpAMD64MOVLconst { 24107 break 24108 } 24109 c := auxIntToInt32(v_0.AuxInt) 24110 v.reset(OpAMD64MOVQconst) 24111 v.AuxInt = int64ToAuxInt(int64(c) << uint64(d)) 24112 return true 24113 } 24114 return false 24115 } 24116 func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool { 24117 v_2 := v.Args[2] 24118 v_1 := v.Args[1] 24119 v_0 := v.Args[0] 24120 b := v.Block 24121 typ := &b.Func.Config.Types 24122 // match: (SHLXLload [off] {sym} ptr (MOVLconst [c]) mem) 24123 // result: (SHLLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem)) 24124 for { 24125 off := auxIntToInt32(v.AuxInt) 24126 sym := auxToSym(v.Aux) 24127 ptr := v_0 24128 if v_1.Op != OpAMD64MOVLconst { 24129 break 24130 } 24131 c := auxIntToInt32(v_1.AuxInt) 24132 mem := v_2 24133 v.reset(OpAMD64SHLLconst) 24134 v.AuxInt = int8ToAuxInt(int8(c & 31)) 24135 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 24136 v0.AuxInt = int32ToAuxInt(off) 24137 v0.Aux = symToAux(sym) 24138 v0.AddArg2(ptr, mem) 24139 v.AddArg(v0) 24140 return true 24141 } 24142 return false 24143 } 24144 func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool { 24145 v_2 := v.Args[2] 24146 v_1 := v.Args[1] 24147 v_0 := v.Args[0] 24148 b := v.Block 24149 typ := &b.Func.Config.Types 24150 // match: (SHLXQload [off] {sym} ptr (MOVQconst [c]) mem) 24151 // result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) 24152 for { 24153 off := auxIntToInt32(v.AuxInt) 24154 sym := auxToSym(v.Aux) 24155 ptr := v_0 24156 if v_1.Op != OpAMD64MOVQconst { 24157 break 24158 } 24159 c := auxIntToInt64(v_1.AuxInt) 24160 mem := v_2 24161 v.reset(OpAMD64SHLQconst) 24162 v.AuxInt = int8ToAuxInt(int8(c & 63)) 24163 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 24164 v0.AuxInt = int32ToAuxInt(off) 24165 v0.Aux = symToAux(sym) 24166 v0.AddArg2(ptr, mem) 24167 v.AddArg(v0) 24168 return true 24169 } 24170 // match: (SHLXQload [off] {sym} ptr (MOVLconst [c]) mem) 24171 // result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) 24172 for { 24173 off := auxIntToInt32(v.AuxInt) 24174 sym := auxToSym(v.Aux) 24175 ptr := v_0 24176 if v_1.Op != OpAMD64MOVLconst { 24177 break 24178 } 24179 c := auxIntToInt32(v_1.AuxInt) 24180 mem := v_2 24181 v.reset(OpAMD64SHLQconst) 24182 v.AuxInt = int8ToAuxInt(int8(c & 63)) 24183 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 24184 v0.AuxInt = int32ToAuxInt(off) 24185 v0.Aux = symToAux(sym) 24186 v0.AddArg2(ptr, mem) 24187 v.AddArg(v0) 24188 return true 24189 } 24190 return false 24191 } 24192 func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool { 24193 v_1 := v.Args[1] 24194 v_0 := v.Args[0] 24195 // match: (SHRB x (MOVQconst [c])) 24196 // cond: c&31 < 8 24197 // result: (SHRBconst [int8(c&31)] x) 24198 for { 24199 x := v_0 24200 if v_1.Op != OpAMD64MOVQconst { 24201 break 24202 } 24203 c := auxIntToInt64(v_1.AuxInt) 24204 if !(c&31 < 8) { 24205 break 24206 } 24207 v.reset(OpAMD64SHRBconst) 24208 v.AuxInt = int8ToAuxInt(int8(c & 31)) 24209 v.AddArg(x) 24210 return true 24211 } 24212 // match: (SHRB x (MOVLconst [c])) 24213 // cond: c&31 < 8 24214 // result: (SHRBconst [int8(c&31)] x) 24215 for { 24216 x := v_0 24217 if v_1.Op != OpAMD64MOVLconst { 24218 break 24219 } 24220 c := auxIntToInt32(v_1.AuxInt) 24221 if !(c&31 < 8) { 24222 break 24223 } 24224 v.reset(OpAMD64SHRBconst) 24225 v.AuxInt = int8ToAuxInt(int8(c & 31)) 24226 v.AddArg(x) 24227 return true 24228 } 24229 // match: (SHRB _ (MOVQconst [c])) 24230 // cond: c&31 >= 8 24231 // result: (MOVLconst [0]) 24232 for { 24233 if v_1.Op != OpAMD64MOVQconst { 24234 break 24235 } 24236 c := auxIntToInt64(v_1.AuxInt) 24237 if !(c&31 >= 8) { 24238 break 24239 } 24240 v.reset(OpAMD64MOVLconst) 24241 v.AuxInt = int32ToAuxInt(0) 24242 return true 24243 } 24244 // match: (SHRB _ (MOVLconst [c])) 24245 // cond: c&31 >= 8 24246 // result: (MOVLconst [0]) 24247 for { 24248 if v_1.Op != OpAMD64MOVLconst { 24249 break 24250 } 24251 c := auxIntToInt32(v_1.AuxInt) 24252 if !(c&31 >= 8) { 24253 break 24254 } 24255 v.reset(OpAMD64MOVLconst) 24256 v.AuxInt = int32ToAuxInt(0) 24257 return true 24258 } 24259 return false 24260 } 24261 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool { 24262 v_0 := v.Args[0] 24263 // match: (SHRBconst x [0]) 24264 // result: x 24265 for { 24266 if auxIntToInt8(v.AuxInt) != 0 { 24267 break 24268 } 24269 x := v_0 24270 v.copyOf(x) 24271 return true 24272 } 24273 return false 24274 } 24275 func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { 24276 v_1 := v.Args[1] 24277 v_0 := v.Args[0] 24278 b := v.Block 24279 // match: (SHRL x (MOVQconst [c])) 24280 // result: (SHRLconst [int8(c&31)] x) 24281 for { 24282 x := v_0 24283 if v_1.Op != OpAMD64MOVQconst { 24284 break 24285 } 24286 c := auxIntToInt64(v_1.AuxInt) 24287 v.reset(OpAMD64SHRLconst) 24288 v.AuxInt = int8ToAuxInt(int8(c & 31)) 24289 v.AddArg(x) 24290 return true 24291 } 24292 // match: (SHRL x (MOVLconst [c])) 24293 // result: (SHRLconst [int8(c&31)] x) 24294 for { 24295 x := v_0 24296 if v_1.Op != OpAMD64MOVLconst { 24297 break 24298 } 24299 c := auxIntToInt32(v_1.AuxInt) 24300 v.reset(OpAMD64SHRLconst) 24301 v.AuxInt = int8ToAuxInt(int8(c & 31)) 24302 v.AddArg(x) 24303 return true 24304 } 24305 // match: (SHRL x (ADDQconst [c] y)) 24306 // cond: c & 31 == 0 24307 // result: (SHRL x y) 24308 for { 24309 x := v_0 24310 if v_1.Op != OpAMD64ADDQconst { 24311 break 24312 } 24313 c := auxIntToInt32(v_1.AuxInt) 24314 y := v_1.Args[0] 24315 if !(c&31 == 0) { 24316 break 24317 } 24318 v.reset(OpAMD64SHRL) 24319 v.AddArg2(x, y) 24320 return true 24321 } 24322 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 24323 // cond: c & 31 == 0 24324 // result: (SHRL x (NEGQ <t> y)) 24325 for { 24326 x := v_0 24327 if v_1.Op != OpAMD64NEGQ { 24328 break 24329 } 24330 t := v_1.Type 24331 v_1_0 := v_1.Args[0] 24332 if v_1_0.Op != OpAMD64ADDQconst { 24333 break 24334 } 24335 c := auxIntToInt32(v_1_0.AuxInt) 24336 y := v_1_0.Args[0] 24337 if !(c&31 == 0) { 24338 break 24339 } 24340 v.reset(OpAMD64SHRL) 24341 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 24342 v0.AddArg(y) 24343 v.AddArg2(x, v0) 24344 return true 24345 } 24346 // match: (SHRL x (ANDQconst [c] y)) 24347 // cond: c & 31 == 31 24348 // result: (SHRL x y) 24349 for { 24350 x := v_0 24351 if v_1.Op != OpAMD64ANDQconst { 24352 break 24353 } 24354 c := auxIntToInt32(v_1.AuxInt) 24355 y := v_1.Args[0] 24356 if !(c&31 == 31) { 24357 break 24358 } 24359 v.reset(OpAMD64SHRL) 24360 v.AddArg2(x, y) 24361 return true 24362 } 24363 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 24364 // cond: c & 31 == 31 24365 // result: (SHRL x (NEGQ <t> y)) 24366 for { 24367 x := v_0 24368 if v_1.Op != OpAMD64NEGQ { 24369 break 24370 } 24371 t := v_1.Type 24372 v_1_0 := v_1.Args[0] 24373 if v_1_0.Op != OpAMD64ANDQconst { 24374 break 24375 } 24376 c := auxIntToInt32(v_1_0.AuxInt) 24377 y := v_1_0.Args[0] 24378 if !(c&31 == 31) { 24379 break 24380 } 24381 v.reset(OpAMD64SHRL) 24382 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 24383 v0.AddArg(y) 24384 v.AddArg2(x, v0) 24385 return true 24386 } 24387 // match: (SHRL x (ADDLconst [c] y)) 24388 // cond: c & 31 == 0 24389 // result: (SHRL x y) 24390 for { 24391 x := v_0 24392 if v_1.Op != OpAMD64ADDLconst { 24393 break 24394 } 24395 c := auxIntToInt32(v_1.AuxInt) 24396 y := v_1.Args[0] 24397 if !(c&31 == 0) { 24398 break 24399 } 24400 v.reset(OpAMD64SHRL) 24401 v.AddArg2(x, y) 24402 return true 24403 } 24404 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 24405 // cond: c & 31 == 0 24406 // result: (SHRL x (NEGL <t> y)) 24407 for { 24408 x := v_0 24409 if v_1.Op != OpAMD64NEGL { 24410 break 24411 } 24412 t := v_1.Type 24413 v_1_0 := v_1.Args[0] 24414 if v_1_0.Op != OpAMD64ADDLconst { 24415 break 24416 } 24417 c := auxIntToInt32(v_1_0.AuxInt) 24418 y := v_1_0.Args[0] 24419 if !(c&31 == 0) { 24420 break 24421 } 24422 v.reset(OpAMD64SHRL) 24423 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 24424 v0.AddArg(y) 24425 v.AddArg2(x, v0) 24426 return true 24427 } 24428 // match: (SHRL x (ANDLconst [c] y)) 24429 // cond: c & 31 == 31 24430 // result: (SHRL x y) 24431 for { 24432 x := v_0 24433 if v_1.Op != OpAMD64ANDLconst { 24434 break 24435 } 24436 c := auxIntToInt32(v_1.AuxInt) 24437 y := v_1.Args[0] 24438 if !(c&31 == 31) { 24439 break 24440 } 24441 v.reset(OpAMD64SHRL) 24442 v.AddArg2(x, y) 24443 return true 24444 } 24445 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 24446 // cond: c & 31 == 31 24447 // result: (SHRL x (NEGL <t> y)) 24448 for { 24449 x := v_0 24450 if v_1.Op != OpAMD64NEGL { 24451 break 24452 } 24453 t := v_1.Type 24454 v_1_0 := v_1.Args[0] 24455 if v_1_0.Op != OpAMD64ANDLconst { 24456 break 24457 } 24458 c := auxIntToInt32(v_1_0.AuxInt) 24459 y := v_1_0.Args[0] 24460 if !(c&31 == 31) { 24461 break 24462 } 24463 v.reset(OpAMD64SHRL) 24464 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 24465 v0.AddArg(y) 24466 v.AddArg2(x, v0) 24467 return true 24468 } 24469 // match: (SHRL l:(MOVLload [off] {sym} ptr mem) x) 24470 // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) 24471 // result: (SHRXLload [off] {sym} ptr x mem) 24472 for { 24473 l := v_0 24474 if l.Op != OpAMD64MOVLload { 24475 break 24476 } 24477 off := auxIntToInt32(l.AuxInt) 24478 sym := auxToSym(l.Aux) 24479 mem := l.Args[1] 24480 ptr := l.Args[0] 24481 x := v_1 24482 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { 24483 break 24484 } 24485 v.reset(OpAMD64SHRXLload) 24486 v.AuxInt = int32ToAuxInt(off) 24487 v.Aux = symToAux(sym) 24488 v.AddArg3(ptr, x, mem) 24489 return true 24490 } 24491 return false 24492 } 24493 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool { 24494 v_0 := v.Args[0] 24495 // match: (SHRLconst [1] (SHLLconst [1] x)) 24496 // result: (BTRLconst [31] x) 24497 for { 24498 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 { 24499 break 24500 } 24501 x := v_0.Args[0] 24502 v.reset(OpAMD64BTRLconst) 24503 v.AuxInt = int8ToAuxInt(31) 24504 v.AddArg(x) 24505 return true 24506 } 24507 // match: (SHRLconst x [0]) 24508 // result: x 24509 for { 24510 if auxIntToInt8(v.AuxInt) != 0 { 24511 break 24512 } 24513 x := v_0 24514 v.copyOf(x) 24515 return true 24516 } 24517 return false 24518 } 24519 func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { 24520 v_1 := v.Args[1] 24521 v_0 := v.Args[0] 24522 b := v.Block 24523 // match: (SHRQ x (MOVQconst [c])) 24524 // result: (SHRQconst [int8(c&63)] x) 24525 for { 24526 x := v_0 24527 if v_1.Op != OpAMD64MOVQconst { 24528 break 24529 } 24530 c := auxIntToInt64(v_1.AuxInt) 24531 v.reset(OpAMD64SHRQconst) 24532 v.AuxInt = int8ToAuxInt(int8(c & 63)) 24533 v.AddArg(x) 24534 return true 24535 } 24536 // match: (SHRQ x (MOVLconst [c])) 24537 // result: (SHRQconst [int8(c&63)] x) 24538 for { 24539 x := v_0 24540 if v_1.Op != OpAMD64MOVLconst { 24541 break 24542 } 24543 c := auxIntToInt32(v_1.AuxInt) 24544 v.reset(OpAMD64SHRQconst) 24545 v.AuxInt = int8ToAuxInt(int8(c & 63)) 24546 v.AddArg(x) 24547 return true 24548 } 24549 // match: (SHRQ x (ADDQconst [c] y)) 24550 // cond: c & 63 == 0 24551 // result: (SHRQ x y) 24552 for { 24553 x := v_0 24554 if v_1.Op != OpAMD64ADDQconst { 24555 break 24556 } 24557 c := auxIntToInt32(v_1.AuxInt) 24558 y := v_1.Args[0] 24559 if !(c&63 == 0) { 24560 break 24561 } 24562 v.reset(OpAMD64SHRQ) 24563 v.AddArg2(x, y) 24564 return true 24565 } 24566 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 24567 // cond: c & 63 == 0 24568 // result: (SHRQ x (NEGQ <t> y)) 24569 for { 24570 x := v_0 24571 if v_1.Op != OpAMD64NEGQ { 24572 break 24573 } 24574 t := v_1.Type 24575 v_1_0 := v_1.Args[0] 24576 if v_1_0.Op != OpAMD64ADDQconst { 24577 break 24578 } 24579 c := auxIntToInt32(v_1_0.AuxInt) 24580 y := v_1_0.Args[0] 24581 if !(c&63 == 0) { 24582 break 24583 } 24584 v.reset(OpAMD64SHRQ) 24585 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 24586 v0.AddArg(y) 24587 v.AddArg2(x, v0) 24588 return true 24589 } 24590 // match: (SHRQ x (ANDQconst [c] y)) 24591 // cond: c & 63 == 63 24592 // result: (SHRQ x y) 24593 for { 24594 x := v_0 24595 if v_1.Op != OpAMD64ANDQconst { 24596 break 24597 } 24598 c := auxIntToInt32(v_1.AuxInt) 24599 y := v_1.Args[0] 24600 if !(c&63 == 63) { 24601 break 24602 } 24603 v.reset(OpAMD64SHRQ) 24604 v.AddArg2(x, y) 24605 return true 24606 } 24607 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 24608 // cond: c & 63 == 63 24609 // result: (SHRQ x (NEGQ <t> y)) 24610 for { 24611 x := v_0 24612 if v_1.Op != OpAMD64NEGQ { 24613 break 24614 } 24615 t := v_1.Type 24616 v_1_0 := v_1.Args[0] 24617 if v_1_0.Op != OpAMD64ANDQconst { 24618 break 24619 } 24620 c := auxIntToInt32(v_1_0.AuxInt) 24621 y := v_1_0.Args[0] 24622 if !(c&63 == 63) { 24623 break 24624 } 24625 v.reset(OpAMD64SHRQ) 24626 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 24627 v0.AddArg(y) 24628 v.AddArg2(x, v0) 24629 return true 24630 } 24631 // match: (SHRQ x (ADDLconst [c] y)) 24632 // cond: c & 63 == 0 24633 // result: (SHRQ x y) 24634 for { 24635 x := v_0 24636 if v_1.Op != OpAMD64ADDLconst { 24637 break 24638 } 24639 c := auxIntToInt32(v_1.AuxInt) 24640 y := v_1.Args[0] 24641 if !(c&63 == 0) { 24642 break 24643 } 24644 v.reset(OpAMD64SHRQ) 24645 v.AddArg2(x, y) 24646 return true 24647 } 24648 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 24649 // cond: c & 63 == 0 24650 // result: (SHRQ x (NEGL <t> y)) 24651 for { 24652 x := v_0 24653 if v_1.Op != OpAMD64NEGL { 24654 break 24655 } 24656 t := v_1.Type 24657 v_1_0 := v_1.Args[0] 24658 if v_1_0.Op != OpAMD64ADDLconst { 24659 break 24660 } 24661 c := auxIntToInt32(v_1_0.AuxInt) 24662 y := v_1_0.Args[0] 24663 if !(c&63 == 0) { 24664 break 24665 } 24666 v.reset(OpAMD64SHRQ) 24667 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 24668 v0.AddArg(y) 24669 v.AddArg2(x, v0) 24670 return true 24671 } 24672 // match: (SHRQ x (ANDLconst [c] y)) 24673 // cond: c & 63 == 63 24674 // result: (SHRQ x y) 24675 for { 24676 x := v_0 24677 if v_1.Op != OpAMD64ANDLconst { 24678 break 24679 } 24680 c := auxIntToInt32(v_1.AuxInt) 24681 y := v_1.Args[0] 24682 if !(c&63 == 63) { 24683 break 24684 } 24685 v.reset(OpAMD64SHRQ) 24686 v.AddArg2(x, y) 24687 return true 24688 } 24689 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 24690 // cond: c & 63 == 63 24691 // result: (SHRQ x (NEGL <t> y)) 24692 for { 24693 x := v_0 24694 if v_1.Op != OpAMD64NEGL { 24695 break 24696 } 24697 t := v_1.Type 24698 v_1_0 := v_1.Args[0] 24699 if v_1_0.Op != OpAMD64ANDLconst { 24700 break 24701 } 24702 c := auxIntToInt32(v_1_0.AuxInt) 24703 y := v_1_0.Args[0] 24704 if !(c&63 == 63) { 24705 break 24706 } 24707 v.reset(OpAMD64SHRQ) 24708 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 24709 v0.AddArg(y) 24710 v.AddArg2(x, v0) 24711 return true 24712 } 24713 // match: (SHRQ l:(MOVQload [off] {sym} ptr mem) x) 24714 // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) 24715 // result: (SHRXQload [off] {sym} ptr x mem) 24716 for { 24717 l := v_0 24718 if l.Op != OpAMD64MOVQload { 24719 break 24720 } 24721 off := auxIntToInt32(l.AuxInt) 24722 sym := auxToSym(l.Aux) 24723 mem := l.Args[1] 24724 ptr := l.Args[0] 24725 x := v_1 24726 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { 24727 break 24728 } 24729 v.reset(OpAMD64SHRXQload) 24730 v.AuxInt = int32ToAuxInt(off) 24731 v.Aux = symToAux(sym) 24732 v.AddArg3(ptr, x, mem) 24733 return true 24734 } 24735 return false 24736 } 24737 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool { 24738 v_0 := v.Args[0] 24739 // match: (SHRQconst [1] (SHLQconst [1] x)) 24740 // result: (BTRQconst [63] x) 24741 for { 24742 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 { 24743 break 24744 } 24745 x := v_0.Args[0] 24746 v.reset(OpAMD64BTRQconst) 24747 v.AuxInt = int8ToAuxInt(63) 24748 v.AddArg(x) 24749 return true 24750 } 24751 // match: (SHRQconst x [0]) 24752 // result: x 24753 for { 24754 if auxIntToInt8(v.AuxInt) != 0 { 24755 break 24756 } 24757 x := v_0 24758 v.copyOf(x) 24759 return true 24760 } 24761 return false 24762 } 24763 func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool { 24764 v_1 := v.Args[1] 24765 v_0 := v.Args[0] 24766 // match: (SHRW x (MOVQconst [c])) 24767 // cond: c&31 < 16 24768 // result: (SHRWconst [int8(c&31)] x) 24769 for { 24770 x := v_0 24771 if v_1.Op != OpAMD64MOVQconst { 24772 break 24773 } 24774 c := auxIntToInt64(v_1.AuxInt) 24775 if !(c&31 < 16) { 24776 break 24777 } 24778 v.reset(OpAMD64SHRWconst) 24779 v.AuxInt = int8ToAuxInt(int8(c & 31)) 24780 v.AddArg(x) 24781 return true 24782 } 24783 // match: (SHRW x (MOVLconst [c])) 24784 // cond: c&31 < 16 24785 // result: (SHRWconst [int8(c&31)] x) 24786 for { 24787 x := v_0 24788 if v_1.Op != OpAMD64MOVLconst { 24789 break 24790 } 24791 c := auxIntToInt32(v_1.AuxInt) 24792 if !(c&31 < 16) { 24793 break 24794 } 24795 v.reset(OpAMD64SHRWconst) 24796 v.AuxInt = int8ToAuxInt(int8(c & 31)) 24797 v.AddArg(x) 24798 return true 24799 } 24800 // match: (SHRW _ (MOVQconst [c])) 24801 // cond: c&31 >= 16 24802 // result: (MOVLconst [0]) 24803 for { 24804 if v_1.Op != OpAMD64MOVQconst { 24805 break 24806 } 24807 c := auxIntToInt64(v_1.AuxInt) 24808 if !(c&31 >= 16) { 24809 break 24810 } 24811 v.reset(OpAMD64MOVLconst) 24812 v.AuxInt = int32ToAuxInt(0) 24813 return true 24814 } 24815 // match: (SHRW _ (MOVLconst [c])) 24816 // cond: c&31 >= 16 24817 // result: (MOVLconst [0]) 24818 for { 24819 if v_1.Op != OpAMD64MOVLconst { 24820 break 24821 } 24822 c := auxIntToInt32(v_1.AuxInt) 24823 if !(c&31 >= 16) { 24824 break 24825 } 24826 v.reset(OpAMD64MOVLconst) 24827 v.AuxInt = int32ToAuxInt(0) 24828 return true 24829 } 24830 return false 24831 } 24832 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool { 24833 v_0 := v.Args[0] 24834 // match: (SHRWconst x [0]) 24835 // result: x 24836 for { 24837 if auxIntToInt8(v.AuxInt) != 0 { 24838 break 24839 } 24840 x := v_0 24841 v.copyOf(x) 24842 return true 24843 } 24844 return false 24845 } 24846 func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool { 24847 v_2 := v.Args[2] 24848 v_1 := v.Args[1] 24849 v_0 := v.Args[0] 24850 b := v.Block 24851 typ := &b.Func.Config.Types 24852 // match: (SHRXLload [off] {sym} ptr (MOVLconst [c]) mem) 24853 // result: (SHRLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem)) 24854 for { 24855 off := auxIntToInt32(v.AuxInt) 24856 sym := auxToSym(v.Aux) 24857 ptr := v_0 24858 if v_1.Op != OpAMD64MOVLconst { 24859 break 24860 } 24861 c := auxIntToInt32(v_1.AuxInt) 24862 mem := v_2 24863 v.reset(OpAMD64SHRLconst) 24864 v.AuxInt = int8ToAuxInt(int8(c & 31)) 24865 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 24866 v0.AuxInt = int32ToAuxInt(off) 24867 v0.Aux = symToAux(sym) 24868 v0.AddArg2(ptr, mem) 24869 v.AddArg(v0) 24870 return true 24871 } 24872 return false 24873 } 24874 func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool { 24875 v_2 := v.Args[2] 24876 v_1 := v.Args[1] 24877 v_0 := v.Args[0] 24878 b := v.Block 24879 typ := &b.Func.Config.Types 24880 // match: (SHRXQload [off] {sym} ptr (MOVQconst [c]) mem) 24881 // result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) 24882 for { 24883 off := auxIntToInt32(v.AuxInt) 24884 sym := auxToSym(v.Aux) 24885 ptr := v_0 24886 if v_1.Op != OpAMD64MOVQconst { 24887 break 24888 } 24889 c := auxIntToInt64(v_1.AuxInt) 24890 mem := v_2 24891 v.reset(OpAMD64SHRQconst) 24892 v.AuxInt = int8ToAuxInt(int8(c & 63)) 24893 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 24894 v0.AuxInt = int32ToAuxInt(off) 24895 v0.Aux = symToAux(sym) 24896 v0.AddArg2(ptr, mem) 24897 v.AddArg(v0) 24898 return true 24899 } 24900 // match: (SHRXQload [off] {sym} ptr (MOVLconst [c]) mem) 24901 // result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) 24902 for { 24903 off := auxIntToInt32(v.AuxInt) 24904 sym := auxToSym(v.Aux) 24905 ptr := v_0 24906 if v_1.Op != OpAMD64MOVLconst { 24907 break 24908 } 24909 c := auxIntToInt32(v_1.AuxInt) 24910 mem := v_2 24911 v.reset(OpAMD64SHRQconst) 24912 v.AuxInt = int8ToAuxInt(int8(c & 63)) 24913 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 24914 v0.AuxInt = int32ToAuxInt(off) 24915 v0.Aux = symToAux(sym) 24916 v0.AddArg2(ptr, mem) 24917 v.AddArg(v0) 24918 return true 24919 } 24920 return false 24921 } 24922 func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool { 24923 v_1 := v.Args[1] 24924 v_0 := v.Args[0] 24925 b := v.Block 24926 // match: (SUBL x (MOVLconst [c])) 24927 // result: (SUBLconst x [c]) 24928 for { 24929 x := v_0 24930 if v_1.Op != OpAMD64MOVLconst { 24931 break 24932 } 24933 c := auxIntToInt32(v_1.AuxInt) 24934 v.reset(OpAMD64SUBLconst) 24935 v.AuxInt = int32ToAuxInt(c) 24936 v.AddArg(x) 24937 return true 24938 } 24939 // match: (SUBL (MOVLconst [c]) x) 24940 // result: (NEGL (SUBLconst <v.Type> x [c])) 24941 for { 24942 if v_0.Op != OpAMD64MOVLconst { 24943 break 24944 } 24945 c := auxIntToInt32(v_0.AuxInt) 24946 x := v_1 24947 v.reset(OpAMD64NEGL) 24948 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 24949 v0.AuxInt = int32ToAuxInt(c) 24950 v0.AddArg(x) 24951 v.AddArg(v0) 24952 return true 24953 } 24954 // match: (SUBL x x) 24955 // result: (MOVLconst [0]) 24956 for { 24957 x := v_0 24958 if x != v_1 { 24959 break 24960 } 24961 v.reset(OpAMD64MOVLconst) 24962 v.AuxInt = int32ToAuxInt(0) 24963 return true 24964 } 24965 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 24966 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 24967 // result: (SUBLload x [off] {sym} ptr mem) 24968 for { 24969 x := v_0 24970 l := v_1 24971 if l.Op != OpAMD64MOVLload { 24972 break 24973 } 24974 off := auxIntToInt32(l.AuxInt) 24975 sym := auxToSym(l.Aux) 24976 mem := l.Args[1] 24977 ptr := l.Args[0] 24978 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 24979 break 24980 } 24981 v.reset(OpAMD64SUBLload) 24982 v.AuxInt = int32ToAuxInt(off) 24983 v.Aux = symToAux(sym) 24984 v.AddArg3(x, ptr, mem) 24985 return true 24986 } 24987 return false 24988 } 24989 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool { 24990 v_0 := v.Args[0] 24991 // match: (SUBLconst [c] x) 24992 // cond: c==0 24993 // result: x 24994 for { 24995 c := auxIntToInt32(v.AuxInt) 24996 x := v_0 24997 if !(c == 0) { 24998 break 24999 } 25000 v.copyOf(x) 25001 return true 25002 } 25003 // match: (SUBLconst [c] x) 25004 // result: (ADDLconst [-c] x) 25005 for { 25006 c := auxIntToInt32(v.AuxInt) 25007 x := v_0 25008 v.reset(OpAMD64ADDLconst) 25009 v.AuxInt = int32ToAuxInt(-c) 25010 v.AddArg(x) 25011 return true 25012 } 25013 } 25014 func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { 25015 v_2 := v.Args[2] 25016 v_1 := v.Args[1] 25017 v_0 := v.Args[0] 25018 b := v.Block 25019 typ := &b.Func.Config.Types 25020 // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem) 25021 // cond: is32Bit(int64(off1)+int64(off2)) 25022 // result: (SUBLload [off1+off2] {sym} val base mem) 25023 for { 25024 off1 := auxIntToInt32(v.AuxInt) 25025 sym := auxToSym(v.Aux) 25026 val := v_0 25027 if v_1.Op != OpAMD64ADDQconst { 25028 break 25029 } 25030 off2 := auxIntToInt32(v_1.AuxInt) 25031 base := v_1.Args[0] 25032 mem := v_2 25033 if !(is32Bit(int64(off1) + int64(off2))) { 25034 break 25035 } 25036 v.reset(OpAMD64SUBLload) 25037 v.AuxInt = int32ToAuxInt(off1 + off2) 25038 v.Aux = symToAux(sym) 25039 v.AddArg3(val, base, mem) 25040 return true 25041 } 25042 // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 25043 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 25044 // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 25045 for { 25046 off1 := auxIntToInt32(v.AuxInt) 25047 sym1 := auxToSym(v.Aux) 25048 val := v_0 25049 if v_1.Op != OpAMD64LEAQ { 25050 break 25051 } 25052 off2 := auxIntToInt32(v_1.AuxInt) 25053 sym2 := auxToSym(v_1.Aux) 25054 base := v_1.Args[0] 25055 mem := v_2 25056 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 25057 break 25058 } 25059 v.reset(OpAMD64SUBLload) 25060 v.AuxInt = int32ToAuxInt(off1 + off2) 25061 v.Aux = symToAux(mergeSym(sym1, sym2)) 25062 v.AddArg3(val, base, mem) 25063 return true 25064 } 25065 // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 25066 // result: (SUBL x (MOVLf2i y)) 25067 for { 25068 off := auxIntToInt32(v.AuxInt) 25069 sym := auxToSym(v.Aux) 25070 x := v_0 25071 ptr := v_1 25072 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 25073 break 25074 } 25075 y := v_2.Args[1] 25076 if ptr != v_2.Args[0] { 25077 break 25078 } 25079 v.reset(OpAMD64SUBL) 25080 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 25081 v0.AddArg(y) 25082 v.AddArg2(x, v0) 25083 return true 25084 } 25085 return false 25086 } 25087 func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool { 25088 v_2 := v.Args[2] 25089 v_1 := v.Args[1] 25090 v_0 := v.Args[0] 25091 // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 25092 // cond: is32Bit(int64(off1)+int64(off2)) 25093 // result: (SUBLmodify [off1+off2] {sym} base val mem) 25094 for { 25095 off1 := auxIntToInt32(v.AuxInt) 25096 sym := auxToSym(v.Aux) 25097 if v_0.Op != OpAMD64ADDQconst { 25098 break 25099 } 25100 off2 := auxIntToInt32(v_0.AuxInt) 25101 base := v_0.Args[0] 25102 val := v_1 25103 mem := v_2 25104 if !(is32Bit(int64(off1) + int64(off2))) { 25105 break 25106 } 25107 v.reset(OpAMD64SUBLmodify) 25108 v.AuxInt = int32ToAuxInt(off1 + off2) 25109 v.Aux = symToAux(sym) 25110 v.AddArg3(base, val, mem) 25111 return true 25112 } 25113 // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 25114 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 25115 // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 25116 for { 25117 off1 := auxIntToInt32(v.AuxInt) 25118 sym1 := auxToSym(v.Aux) 25119 if v_0.Op != OpAMD64LEAQ { 25120 break 25121 } 25122 off2 := auxIntToInt32(v_0.AuxInt) 25123 sym2 := auxToSym(v_0.Aux) 25124 base := v_0.Args[0] 25125 val := v_1 25126 mem := v_2 25127 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 25128 break 25129 } 25130 v.reset(OpAMD64SUBLmodify) 25131 v.AuxInt = int32ToAuxInt(off1 + off2) 25132 v.Aux = symToAux(mergeSym(sym1, sym2)) 25133 v.AddArg3(base, val, mem) 25134 return true 25135 } 25136 return false 25137 } 25138 func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool { 25139 v_1 := v.Args[1] 25140 v_0 := v.Args[0] 25141 b := v.Block 25142 // match: (SUBQ x (MOVQconst [c])) 25143 // cond: is32Bit(c) 25144 // result: (SUBQconst x [int32(c)]) 25145 for { 25146 x := v_0 25147 if v_1.Op != OpAMD64MOVQconst { 25148 break 25149 } 25150 c := auxIntToInt64(v_1.AuxInt) 25151 if !(is32Bit(c)) { 25152 break 25153 } 25154 v.reset(OpAMD64SUBQconst) 25155 v.AuxInt = int32ToAuxInt(int32(c)) 25156 v.AddArg(x) 25157 return true 25158 } 25159 // match: (SUBQ (MOVQconst [c]) x) 25160 // cond: is32Bit(c) 25161 // result: (NEGQ (SUBQconst <v.Type> x [int32(c)])) 25162 for { 25163 if v_0.Op != OpAMD64MOVQconst { 25164 break 25165 } 25166 c := auxIntToInt64(v_0.AuxInt) 25167 x := v_1 25168 if !(is32Bit(c)) { 25169 break 25170 } 25171 v.reset(OpAMD64NEGQ) 25172 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 25173 v0.AuxInt = int32ToAuxInt(int32(c)) 25174 v0.AddArg(x) 25175 v.AddArg(v0) 25176 return true 25177 } 25178 // match: (SUBQ x x) 25179 // result: (MOVQconst [0]) 25180 for { 25181 x := v_0 25182 if x != v_1 { 25183 break 25184 } 25185 v.reset(OpAMD64MOVQconst) 25186 v.AuxInt = int64ToAuxInt(0) 25187 return true 25188 } 25189 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 25190 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 25191 // result: (SUBQload x [off] {sym} ptr mem) 25192 for { 25193 x := v_0 25194 l := v_1 25195 if l.Op != OpAMD64MOVQload { 25196 break 25197 } 25198 off := auxIntToInt32(l.AuxInt) 25199 sym := auxToSym(l.Aux) 25200 mem := l.Args[1] 25201 ptr := l.Args[0] 25202 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 25203 break 25204 } 25205 v.reset(OpAMD64SUBQload) 25206 v.AuxInt = int32ToAuxInt(off) 25207 v.Aux = symToAux(sym) 25208 v.AddArg3(x, ptr, mem) 25209 return true 25210 } 25211 return false 25212 } 25213 func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool { 25214 v_1 := v.Args[1] 25215 v_0 := v.Args[0] 25216 // match: (SUBQborrow x (MOVQconst [c])) 25217 // cond: is32Bit(c) 25218 // result: (SUBQconstborrow x [int32(c)]) 25219 for { 25220 x := v_0 25221 if v_1.Op != OpAMD64MOVQconst { 25222 break 25223 } 25224 c := auxIntToInt64(v_1.AuxInt) 25225 if !(is32Bit(c)) { 25226 break 25227 } 25228 v.reset(OpAMD64SUBQconstborrow) 25229 v.AuxInt = int32ToAuxInt(int32(c)) 25230 v.AddArg(x) 25231 return true 25232 } 25233 return false 25234 } 25235 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool { 25236 v_0 := v.Args[0] 25237 // match: (SUBQconst [0] x) 25238 // result: x 25239 for { 25240 if auxIntToInt32(v.AuxInt) != 0 { 25241 break 25242 } 25243 x := v_0 25244 v.copyOf(x) 25245 return true 25246 } 25247 // match: (SUBQconst [c] x) 25248 // cond: c != -(1<<31) 25249 // result: (ADDQconst [-c] x) 25250 for { 25251 c := auxIntToInt32(v.AuxInt) 25252 x := v_0 25253 if !(c != -(1 << 31)) { 25254 break 25255 } 25256 v.reset(OpAMD64ADDQconst) 25257 v.AuxInt = int32ToAuxInt(-c) 25258 v.AddArg(x) 25259 return true 25260 } 25261 // match: (SUBQconst (MOVQconst [d]) [c]) 25262 // result: (MOVQconst [d-int64(c)]) 25263 for { 25264 c := auxIntToInt32(v.AuxInt) 25265 if v_0.Op != OpAMD64MOVQconst { 25266 break 25267 } 25268 d := auxIntToInt64(v_0.AuxInt) 25269 v.reset(OpAMD64MOVQconst) 25270 v.AuxInt = int64ToAuxInt(d - int64(c)) 25271 return true 25272 } 25273 // match: (SUBQconst (SUBQconst x [d]) [c]) 25274 // cond: is32Bit(int64(-c)-int64(d)) 25275 // result: (ADDQconst [-c-d] x) 25276 for { 25277 c := auxIntToInt32(v.AuxInt) 25278 if v_0.Op != OpAMD64SUBQconst { 25279 break 25280 } 25281 d := auxIntToInt32(v_0.AuxInt) 25282 x := v_0.Args[0] 25283 if !(is32Bit(int64(-c) - int64(d))) { 25284 break 25285 } 25286 v.reset(OpAMD64ADDQconst) 25287 v.AuxInt = int32ToAuxInt(-c - d) 25288 v.AddArg(x) 25289 return true 25290 } 25291 return false 25292 } 25293 func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { 25294 v_2 := v.Args[2] 25295 v_1 := v.Args[1] 25296 v_0 := v.Args[0] 25297 b := v.Block 25298 typ := &b.Func.Config.Types 25299 // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem) 25300 // cond: is32Bit(int64(off1)+int64(off2)) 25301 // result: (SUBQload [off1+off2] {sym} val base mem) 25302 for { 25303 off1 := auxIntToInt32(v.AuxInt) 25304 sym := auxToSym(v.Aux) 25305 val := v_0 25306 if v_1.Op != OpAMD64ADDQconst { 25307 break 25308 } 25309 off2 := auxIntToInt32(v_1.AuxInt) 25310 base := v_1.Args[0] 25311 mem := v_2 25312 if !(is32Bit(int64(off1) + int64(off2))) { 25313 break 25314 } 25315 v.reset(OpAMD64SUBQload) 25316 v.AuxInt = int32ToAuxInt(off1 + off2) 25317 v.Aux = symToAux(sym) 25318 v.AddArg3(val, base, mem) 25319 return true 25320 } 25321 // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 25322 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 25323 // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 25324 for { 25325 off1 := auxIntToInt32(v.AuxInt) 25326 sym1 := auxToSym(v.Aux) 25327 val := v_0 25328 if v_1.Op != OpAMD64LEAQ { 25329 break 25330 } 25331 off2 := auxIntToInt32(v_1.AuxInt) 25332 sym2 := auxToSym(v_1.Aux) 25333 base := v_1.Args[0] 25334 mem := v_2 25335 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 25336 break 25337 } 25338 v.reset(OpAMD64SUBQload) 25339 v.AuxInt = int32ToAuxInt(off1 + off2) 25340 v.Aux = symToAux(mergeSym(sym1, sym2)) 25341 v.AddArg3(val, base, mem) 25342 return true 25343 } 25344 // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 25345 // result: (SUBQ x (MOVQf2i y)) 25346 for { 25347 off := auxIntToInt32(v.AuxInt) 25348 sym := auxToSym(v.Aux) 25349 x := v_0 25350 ptr := v_1 25351 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 25352 break 25353 } 25354 y := v_2.Args[1] 25355 if ptr != v_2.Args[0] { 25356 break 25357 } 25358 v.reset(OpAMD64SUBQ) 25359 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 25360 v0.AddArg(y) 25361 v.AddArg2(x, v0) 25362 return true 25363 } 25364 return false 25365 } 25366 func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool { 25367 v_2 := v.Args[2] 25368 v_1 := v.Args[1] 25369 v_0 := v.Args[0] 25370 // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 25371 // cond: is32Bit(int64(off1)+int64(off2)) 25372 // result: (SUBQmodify [off1+off2] {sym} base val mem) 25373 for { 25374 off1 := auxIntToInt32(v.AuxInt) 25375 sym := auxToSym(v.Aux) 25376 if v_0.Op != OpAMD64ADDQconst { 25377 break 25378 } 25379 off2 := auxIntToInt32(v_0.AuxInt) 25380 base := v_0.Args[0] 25381 val := v_1 25382 mem := v_2 25383 if !(is32Bit(int64(off1) + int64(off2))) { 25384 break 25385 } 25386 v.reset(OpAMD64SUBQmodify) 25387 v.AuxInt = int32ToAuxInt(off1 + off2) 25388 v.Aux = symToAux(sym) 25389 v.AddArg3(base, val, mem) 25390 return true 25391 } 25392 // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 25393 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 25394 // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 25395 for { 25396 off1 := auxIntToInt32(v.AuxInt) 25397 sym1 := auxToSym(v.Aux) 25398 if v_0.Op != OpAMD64LEAQ { 25399 break 25400 } 25401 off2 := auxIntToInt32(v_0.AuxInt) 25402 sym2 := auxToSym(v_0.Aux) 25403 base := v_0.Args[0] 25404 val := v_1 25405 mem := v_2 25406 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 25407 break 25408 } 25409 v.reset(OpAMD64SUBQmodify) 25410 v.AuxInt = int32ToAuxInt(off1 + off2) 25411 v.Aux = symToAux(mergeSym(sym1, sym2)) 25412 v.AddArg3(base, val, mem) 25413 return true 25414 } 25415 return false 25416 } 25417 func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool { 25418 v_1 := v.Args[1] 25419 v_0 := v.Args[0] 25420 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 25421 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 25422 // result: (SUBSDload x [off] {sym} ptr mem) 25423 for { 25424 x := v_0 25425 l := v_1 25426 if l.Op != OpAMD64MOVSDload { 25427 break 25428 } 25429 off := auxIntToInt32(l.AuxInt) 25430 sym := auxToSym(l.Aux) 25431 mem := l.Args[1] 25432 ptr := l.Args[0] 25433 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 25434 break 25435 } 25436 v.reset(OpAMD64SUBSDload) 25437 v.AuxInt = int32ToAuxInt(off) 25438 v.Aux = symToAux(sym) 25439 v.AddArg3(x, ptr, mem) 25440 return true 25441 } 25442 return false 25443 } 25444 func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { 25445 v_2 := v.Args[2] 25446 v_1 := v.Args[1] 25447 v_0 := v.Args[0] 25448 b := v.Block 25449 typ := &b.Func.Config.Types 25450 // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem) 25451 // cond: is32Bit(int64(off1)+int64(off2)) 25452 // result: (SUBSDload [off1+off2] {sym} val base mem) 25453 for { 25454 off1 := auxIntToInt32(v.AuxInt) 25455 sym := auxToSym(v.Aux) 25456 val := v_0 25457 if v_1.Op != OpAMD64ADDQconst { 25458 break 25459 } 25460 off2 := auxIntToInt32(v_1.AuxInt) 25461 base := v_1.Args[0] 25462 mem := v_2 25463 if !(is32Bit(int64(off1) + int64(off2))) { 25464 break 25465 } 25466 v.reset(OpAMD64SUBSDload) 25467 v.AuxInt = int32ToAuxInt(off1 + off2) 25468 v.Aux = symToAux(sym) 25469 v.AddArg3(val, base, mem) 25470 return true 25471 } 25472 // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 25473 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 25474 // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 25475 for { 25476 off1 := auxIntToInt32(v.AuxInt) 25477 sym1 := auxToSym(v.Aux) 25478 val := v_0 25479 if v_1.Op != OpAMD64LEAQ { 25480 break 25481 } 25482 off2 := auxIntToInt32(v_1.AuxInt) 25483 sym2 := auxToSym(v_1.Aux) 25484 base := v_1.Args[0] 25485 mem := v_2 25486 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 25487 break 25488 } 25489 v.reset(OpAMD64SUBSDload) 25490 v.AuxInt = int32ToAuxInt(off1 + off2) 25491 v.Aux = symToAux(mergeSym(sym1, sym2)) 25492 v.AddArg3(val, base, mem) 25493 return true 25494 } 25495 // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 25496 // result: (SUBSD x (MOVQi2f y)) 25497 for { 25498 off := auxIntToInt32(v.AuxInt) 25499 sym := auxToSym(v.Aux) 25500 x := v_0 25501 ptr := v_1 25502 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 25503 break 25504 } 25505 y := v_2.Args[1] 25506 if ptr != v_2.Args[0] { 25507 break 25508 } 25509 v.reset(OpAMD64SUBSD) 25510 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) 25511 v0.AddArg(y) 25512 v.AddArg2(x, v0) 25513 return true 25514 } 25515 return false 25516 } 25517 func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool { 25518 v_1 := v.Args[1] 25519 v_0 := v.Args[0] 25520 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 25521 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 25522 // result: (SUBSSload x [off] {sym} ptr mem) 25523 for { 25524 x := v_0 25525 l := v_1 25526 if l.Op != OpAMD64MOVSSload { 25527 break 25528 } 25529 off := auxIntToInt32(l.AuxInt) 25530 sym := auxToSym(l.Aux) 25531 mem := l.Args[1] 25532 ptr := l.Args[0] 25533 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 25534 break 25535 } 25536 v.reset(OpAMD64SUBSSload) 25537 v.AuxInt = int32ToAuxInt(off) 25538 v.Aux = symToAux(sym) 25539 v.AddArg3(x, ptr, mem) 25540 return true 25541 } 25542 return false 25543 } 25544 func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { 25545 v_2 := v.Args[2] 25546 v_1 := v.Args[1] 25547 v_0 := v.Args[0] 25548 b := v.Block 25549 typ := &b.Func.Config.Types 25550 // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem) 25551 // cond: is32Bit(int64(off1)+int64(off2)) 25552 // result: (SUBSSload [off1+off2] {sym} val base mem) 25553 for { 25554 off1 := auxIntToInt32(v.AuxInt) 25555 sym := auxToSym(v.Aux) 25556 val := v_0 25557 if v_1.Op != OpAMD64ADDQconst { 25558 break 25559 } 25560 off2 := auxIntToInt32(v_1.AuxInt) 25561 base := v_1.Args[0] 25562 mem := v_2 25563 if !(is32Bit(int64(off1) + int64(off2))) { 25564 break 25565 } 25566 v.reset(OpAMD64SUBSSload) 25567 v.AuxInt = int32ToAuxInt(off1 + off2) 25568 v.Aux = symToAux(sym) 25569 v.AddArg3(val, base, mem) 25570 return true 25571 } 25572 // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 25573 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 25574 // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 25575 for { 25576 off1 := auxIntToInt32(v.AuxInt) 25577 sym1 := auxToSym(v.Aux) 25578 val := v_0 25579 if v_1.Op != OpAMD64LEAQ { 25580 break 25581 } 25582 off2 := auxIntToInt32(v_1.AuxInt) 25583 sym2 := auxToSym(v_1.Aux) 25584 base := v_1.Args[0] 25585 mem := v_2 25586 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 25587 break 25588 } 25589 v.reset(OpAMD64SUBSSload) 25590 v.AuxInt = int32ToAuxInt(off1 + off2) 25591 v.Aux = symToAux(mergeSym(sym1, sym2)) 25592 v.AddArg3(val, base, mem) 25593 return true 25594 } 25595 // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 25596 // result: (SUBSS x (MOVLi2f y)) 25597 for { 25598 off := auxIntToInt32(v.AuxInt) 25599 sym := auxToSym(v.Aux) 25600 x := v_0 25601 ptr := v_1 25602 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 25603 break 25604 } 25605 y := v_2.Args[1] 25606 if ptr != v_2.Args[0] { 25607 break 25608 } 25609 v.reset(OpAMD64SUBSS) 25610 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) 25611 v0.AddArg(y) 25612 v.AddArg2(x, v0) 25613 return true 25614 } 25615 return false 25616 } 25617 func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool { 25618 v_1 := v.Args[1] 25619 v_0 := v.Args[0] 25620 b := v.Block 25621 // match: (TESTB (MOVLconst [c]) x) 25622 // result: (TESTBconst [int8(c)] x) 25623 for { 25624 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 25625 if v_0.Op != OpAMD64MOVLconst { 25626 continue 25627 } 25628 c := auxIntToInt32(v_0.AuxInt) 25629 x := v_1 25630 v.reset(OpAMD64TESTBconst) 25631 v.AuxInt = int8ToAuxInt(int8(c)) 25632 v.AddArg(x) 25633 return true 25634 } 25635 break 25636 } 25637 // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2) 25638 // cond: l == l2 && l.Uses == 2 && clobber(l) 25639 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0, off)] ptr mem) 25640 for { 25641 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 25642 l := v_0 25643 if l.Op != OpAMD64MOVBload { 25644 continue 25645 } 25646 off := auxIntToInt32(l.AuxInt) 25647 sym := auxToSym(l.Aux) 25648 mem := l.Args[1] 25649 ptr := l.Args[0] 25650 l2 := v_1 25651 if !(l == l2 && l.Uses == 2 && clobber(l)) { 25652 continue 25653 } 25654 b = l.Block 25655 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) 25656 v.copyOf(v0) 25657 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) 25658 v0.Aux = symToAux(sym) 25659 v0.AddArg2(ptr, mem) 25660 return true 25661 } 25662 break 25663 } 25664 return false 25665 } 25666 func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool { 25667 v_0 := v.Args[0] 25668 // match: (TESTBconst [-1] x) 25669 // cond: x.Op != OpAMD64MOVLconst 25670 // result: (TESTB x x) 25671 for { 25672 if auxIntToInt8(v.AuxInt) != -1 { 25673 break 25674 } 25675 x := v_0 25676 if !(x.Op != OpAMD64MOVLconst) { 25677 break 25678 } 25679 v.reset(OpAMD64TESTB) 25680 v.AddArg2(x, x) 25681 return true 25682 } 25683 return false 25684 } 25685 func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { 25686 v_1 := v.Args[1] 25687 v_0 := v.Args[0] 25688 b := v.Block 25689 // match: (TESTL (MOVLconst [c]) x) 25690 // result: (TESTLconst [c] x) 25691 for { 25692 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 25693 if v_0.Op != OpAMD64MOVLconst { 25694 continue 25695 } 25696 c := auxIntToInt32(v_0.AuxInt) 25697 x := v_1 25698 v.reset(OpAMD64TESTLconst) 25699 v.AuxInt = int32ToAuxInt(c) 25700 v.AddArg(x) 25701 return true 25702 } 25703 break 25704 } 25705 // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2) 25706 // cond: l == l2 && l.Uses == 2 && clobber(l) 25707 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0, off)] ptr mem) 25708 for { 25709 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 25710 l := v_0 25711 if l.Op != OpAMD64MOVLload { 25712 continue 25713 } 25714 off := auxIntToInt32(l.AuxInt) 25715 sym := auxToSym(l.Aux) 25716 mem := l.Args[1] 25717 ptr := l.Args[0] 25718 l2 := v_1 25719 if !(l == l2 && l.Uses == 2 && clobber(l)) { 25720 continue 25721 } 25722 b = l.Block 25723 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) 25724 v.copyOf(v0) 25725 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) 25726 v0.Aux = symToAux(sym) 25727 v0.AddArg2(ptr, mem) 25728 return true 25729 } 25730 break 25731 } 25732 // match: (TESTL a:(ANDLload [off] {sym} x ptr mem) a) 25733 // cond: a.Uses == 2 && a.Block == v.Block && clobber(a) 25734 // result: (TESTL (MOVLload <a.Type> [off] {sym} ptr mem) x) 25735 for { 25736 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 25737 a := v_0 25738 if a.Op != OpAMD64ANDLload { 25739 continue 25740 } 25741 off := auxIntToInt32(a.AuxInt) 25742 sym := auxToSym(a.Aux) 25743 mem := a.Args[2] 25744 x := a.Args[0] 25745 ptr := a.Args[1] 25746 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) { 25747 continue 25748 } 25749 v.reset(OpAMD64TESTL) 25750 v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type) 25751 v0.AuxInt = int32ToAuxInt(off) 25752 v0.Aux = symToAux(sym) 25753 v0.AddArg2(ptr, mem) 25754 v.AddArg2(v0, x) 25755 return true 25756 } 25757 break 25758 } 25759 return false 25760 } 25761 func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool { 25762 v_0 := v.Args[0] 25763 // match: (TESTLconst [c] (MOVLconst [c])) 25764 // cond: c == 0 25765 // result: (FlagEQ) 25766 for { 25767 c := auxIntToInt32(v.AuxInt) 25768 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) { 25769 break 25770 } 25771 v.reset(OpAMD64FlagEQ) 25772 return true 25773 } 25774 // match: (TESTLconst [c] (MOVLconst [c])) 25775 // cond: c < 0 25776 // result: (FlagLT_UGT) 25777 for { 25778 c := auxIntToInt32(v.AuxInt) 25779 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) { 25780 break 25781 } 25782 v.reset(OpAMD64FlagLT_UGT) 25783 return true 25784 } 25785 // match: (TESTLconst [c] (MOVLconst [c])) 25786 // cond: c > 0 25787 // result: (FlagGT_UGT) 25788 for { 25789 c := auxIntToInt32(v.AuxInt) 25790 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) { 25791 break 25792 } 25793 v.reset(OpAMD64FlagGT_UGT) 25794 return true 25795 } 25796 // match: (TESTLconst [-1] x) 25797 // cond: x.Op != OpAMD64MOVLconst 25798 // result: (TESTL x x) 25799 for { 25800 if auxIntToInt32(v.AuxInt) != -1 { 25801 break 25802 } 25803 x := v_0 25804 if !(x.Op != OpAMD64MOVLconst) { 25805 break 25806 } 25807 v.reset(OpAMD64TESTL) 25808 v.AddArg2(x, x) 25809 return true 25810 } 25811 return false 25812 } 25813 func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { 25814 v_1 := v.Args[1] 25815 v_0 := v.Args[0] 25816 b := v.Block 25817 // match: (TESTQ (MOVQconst [c]) x) 25818 // cond: is32Bit(c) 25819 // result: (TESTQconst [int32(c)] x) 25820 for { 25821 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 25822 if v_0.Op != OpAMD64MOVQconst { 25823 continue 25824 } 25825 c := auxIntToInt64(v_0.AuxInt) 25826 x := v_1 25827 if !(is32Bit(c)) { 25828 continue 25829 } 25830 v.reset(OpAMD64TESTQconst) 25831 v.AuxInt = int32ToAuxInt(int32(c)) 25832 v.AddArg(x) 25833 return true 25834 } 25835 break 25836 } 25837 // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2) 25838 // cond: l == l2 && l.Uses == 2 && clobber(l) 25839 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0, off)] ptr mem) 25840 for { 25841 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 25842 l := v_0 25843 if l.Op != OpAMD64MOVQload { 25844 continue 25845 } 25846 off := auxIntToInt32(l.AuxInt) 25847 sym := auxToSym(l.Aux) 25848 mem := l.Args[1] 25849 ptr := l.Args[0] 25850 l2 := v_1 25851 if !(l == l2 && l.Uses == 2 && clobber(l)) { 25852 continue 25853 } 25854 b = l.Block 25855 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) 25856 v.copyOf(v0) 25857 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) 25858 v0.Aux = symToAux(sym) 25859 v0.AddArg2(ptr, mem) 25860 return true 25861 } 25862 break 25863 } 25864 // match: (TESTQ a:(ANDQload [off] {sym} x ptr mem) a) 25865 // cond: a.Uses == 2 && a.Block == v.Block && clobber(a) 25866 // result: (TESTQ (MOVQload <a.Type> [off] {sym} ptr mem) x) 25867 for { 25868 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 25869 a := v_0 25870 if a.Op != OpAMD64ANDQload { 25871 continue 25872 } 25873 off := auxIntToInt32(a.AuxInt) 25874 sym := auxToSym(a.Aux) 25875 mem := a.Args[2] 25876 x := a.Args[0] 25877 ptr := a.Args[1] 25878 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) { 25879 continue 25880 } 25881 v.reset(OpAMD64TESTQ) 25882 v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type) 25883 v0.AuxInt = int32ToAuxInt(off) 25884 v0.Aux = symToAux(sym) 25885 v0.AddArg2(ptr, mem) 25886 v.AddArg2(v0, x) 25887 return true 25888 } 25889 break 25890 } 25891 return false 25892 } 25893 func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool { 25894 v_0 := v.Args[0] 25895 // match: (TESTQconst [c] (MOVQconst [d])) 25896 // cond: int64(c) == d && c == 0 25897 // result: (FlagEQ) 25898 for { 25899 c := auxIntToInt32(v.AuxInt) 25900 if v_0.Op != OpAMD64MOVQconst { 25901 break 25902 } 25903 d := auxIntToInt64(v_0.AuxInt) 25904 if !(int64(c) == d && c == 0) { 25905 break 25906 } 25907 v.reset(OpAMD64FlagEQ) 25908 return true 25909 } 25910 // match: (TESTQconst [c] (MOVQconst [d])) 25911 // cond: int64(c) == d && c < 0 25912 // result: (FlagLT_UGT) 25913 for { 25914 c := auxIntToInt32(v.AuxInt) 25915 if v_0.Op != OpAMD64MOVQconst { 25916 break 25917 } 25918 d := auxIntToInt64(v_0.AuxInt) 25919 if !(int64(c) == d && c < 0) { 25920 break 25921 } 25922 v.reset(OpAMD64FlagLT_UGT) 25923 return true 25924 } 25925 // match: (TESTQconst [c] (MOVQconst [d])) 25926 // cond: int64(c) == d && c > 0 25927 // result: (FlagGT_UGT) 25928 for { 25929 c := auxIntToInt32(v.AuxInt) 25930 if v_0.Op != OpAMD64MOVQconst { 25931 break 25932 } 25933 d := auxIntToInt64(v_0.AuxInt) 25934 if !(int64(c) == d && c > 0) { 25935 break 25936 } 25937 v.reset(OpAMD64FlagGT_UGT) 25938 return true 25939 } 25940 // match: (TESTQconst [-1] x) 25941 // cond: x.Op != OpAMD64MOVQconst 25942 // result: (TESTQ x x) 25943 for { 25944 if auxIntToInt32(v.AuxInt) != -1 { 25945 break 25946 } 25947 x := v_0 25948 if !(x.Op != OpAMD64MOVQconst) { 25949 break 25950 } 25951 v.reset(OpAMD64TESTQ) 25952 v.AddArg2(x, x) 25953 return true 25954 } 25955 return false 25956 } 25957 func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool { 25958 v_1 := v.Args[1] 25959 v_0 := v.Args[0] 25960 b := v.Block 25961 // match: (TESTW (MOVLconst [c]) x) 25962 // result: (TESTWconst [int16(c)] x) 25963 for { 25964 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 25965 if v_0.Op != OpAMD64MOVLconst { 25966 continue 25967 } 25968 c := auxIntToInt32(v_0.AuxInt) 25969 x := v_1 25970 v.reset(OpAMD64TESTWconst) 25971 v.AuxInt = int16ToAuxInt(int16(c)) 25972 v.AddArg(x) 25973 return true 25974 } 25975 break 25976 } 25977 // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2) 25978 // cond: l == l2 && l.Uses == 2 && clobber(l) 25979 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0, off)] ptr mem) 25980 for { 25981 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 25982 l := v_0 25983 if l.Op != OpAMD64MOVWload { 25984 continue 25985 } 25986 off := auxIntToInt32(l.AuxInt) 25987 sym := auxToSym(l.Aux) 25988 mem := l.Args[1] 25989 ptr := l.Args[0] 25990 l2 := v_1 25991 if !(l == l2 && l.Uses == 2 && clobber(l)) { 25992 continue 25993 } 25994 b = l.Block 25995 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) 25996 v.copyOf(v0) 25997 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) 25998 v0.Aux = symToAux(sym) 25999 v0.AddArg2(ptr, mem) 26000 return true 26001 } 26002 break 26003 } 26004 return false 26005 } 26006 func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { 26007 v_0 := v.Args[0] 26008 // match: (TESTWconst [-1] x) 26009 // cond: x.Op != OpAMD64MOVLconst 26010 // result: (TESTW x x) 26011 for { 26012 if auxIntToInt16(v.AuxInt) != -1 { 26013 break 26014 } 26015 x := v_0 26016 if !(x.Op != OpAMD64MOVLconst) { 26017 break 26018 } 26019 v.reset(OpAMD64TESTW) 26020 v.AddArg2(x, x) 26021 return true 26022 } 26023 return false 26024 } 26025 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { 26026 v_2 := v.Args[2] 26027 v_1 := v.Args[1] 26028 v_0 := v.Args[0] 26029 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 26030 // cond: is32Bit(int64(off1)+int64(off2)) 26031 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 26032 for { 26033 off1 := auxIntToInt32(v.AuxInt) 26034 sym := auxToSym(v.Aux) 26035 val := v_0 26036 if v_1.Op != OpAMD64ADDQconst { 26037 break 26038 } 26039 off2 := auxIntToInt32(v_1.AuxInt) 26040 ptr := v_1.Args[0] 26041 mem := v_2 26042 if !(is32Bit(int64(off1) + int64(off2))) { 26043 break 26044 } 26045 v.reset(OpAMD64XADDLlock) 26046 v.AuxInt = int32ToAuxInt(off1 + off2) 26047 v.Aux = symToAux(sym) 26048 v.AddArg3(val, ptr, mem) 26049 return true 26050 } 26051 return false 26052 } 26053 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool { 26054 v_2 := v.Args[2] 26055 v_1 := v.Args[1] 26056 v_0 := v.Args[0] 26057 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 26058 // cond: is32Bit(int64(off1)+int64(off2)) 26059 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 26060 for { 26061 off1 := auxIntToInt32(v.AuxInt) 26062 sym := auxToSym(v.Aux) 26063 val := v_0 26064 if v_1.Op != OpAMD64ADDQconst { 26065 break 26066 } 26067 off2 := auxIntToInt32(v_1.AuxInt) 26068 ptr := v_1.Args[0] 26069 mem := v_2 26070 if !(is32Bit(int64(off1) + int64(off2))) { 26071 break 26072 } 26073 v.reset(OpAMD64XADDQlock) 26074 v.AuxInt = int32ToAuxInt(off1 + off2) 26075 v.Aux = symToAux(sym) 26076 v.AddArg3(val, ptr, mem) 26077 return true 26078 } 26079 return false 26080 } 26081 func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { 26082 v_2 := v.Args[2] 26083 v_1 := v.Args[1] 26084 v_0 := v.Args[0] 26085 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 26086 // cond: is32Bit(int64(off1)+int64(off2)) 26087 // result: (XCHGL [off1+off2] {sym} val ptr mem) 26088 for { 26089 off1 := auxIntToInt32(v.AuxInt) 26090 sym := auxToSym(v.Aux) 26091 val := v_0 26092 if v_1.Op != OpAMD64ADDQconst { 26093 break 26094 } 26095 off2 := auxIntToInt32(v_1.AuxInt) 26096 ptr := v_1.Args[0] 26097 mem := v_2 26098 if !(is32Bit(int64(off1) + int64(off2))) { 26099 break 26100 } 26101 v.reset(OpAMD64XCHGL) 26102 v.AuxInt = int32ToAuxInt(off1 + off2) 26103 v.Aux = symToAux(sym) 26104 v.AddArg3(val, ptr, mem) 26105 return true 26106 } 26107 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 26108 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 26109 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 26110 for { 26111 off1 := auxIntToInt32(v.AuxInt) 26112 sym1 := auxToSym(v.Aux) 26113 val := v_0 26114 if v_1.Op != OpAMD64LEAQ { 26115 break 26116 } 26117 off2 := auxIntToInt32(v_1.AuxInt) 26118 sym2 := auxToSym(v_1.Aux) 26119 ptr := v_1.Args[0] 26120 mem := v_2 26121 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 26122 break 26123 } 26124 v.reset(OpAMD64XCHGL) 26125 v.AuxInt = int32ToAuxInt(off1 + off2) 26126 v.Aux = symToAux(mergeSym(sym1, sym2)) 26127 v.AddArg3(val, ptr, mem) 26128 return true 26129 } 26130 return false 26131 } 26132 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { 26133 v_2 := v.Args[2] 26134 v_1 := v.Args[1] 26135 v_0 := v.Args[0] 26136 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 26137 // cond: is32Bit(int64(off1)+int64(off2)) 26138 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 26139 for { 26140 off1 := auxIntToInt32(v.AuxInt) 26141 sym := auxToSym(v.Aux) 26142 val := v_0 26143 if v_1.Op != OpAMD64ADDQconst { 26144 break 26145 } 26146 off2 := auxIntToInt32(v_1.AuxInt) 26147 ptr := v_1.Args[0] 26148 mem := v_2 26149 if !(is32Bit(int64(off1) + int64(off2))) { 26150 break 26151 } 26152 v.reset(OpAMD64XCHGQ) 26153 v.AuxInt = int32ToAuxInt(off1 + off2) 26154 v.Aux = symToAux(sym) 26155 v.AddArg3(val, ptr, mem) 26156 return true 26157 } 26158 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 26159 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 26160 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 26161 for { 26162 off1 := auxIntToInt32(v.AuxInt) 26163 sym1 := auxToSym(v.Aux) 26164 val := v_0 26165 if v_1.Op != OpAMD64LEAQ { 26166 break 26167 } 26168 off2 := auxIntToInt32(v_1.AuxInt) 26169 sym2 := auxToSym(v_1.Aux) 26170 ptr := v_1.Args[0] 26171 mem := v_2 26172 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 26173 break 26174 } 26175 v.reset(OpAMD64XCHGQ) 26176 v.AuxInt = int32ToAuxInt(off1 + off2) 26177 v.Aux = symToAux(mergeSym(sym1, sym2)) 26178 v.AddArg3(val, ptr, mem) 26179 return true 26180 } 26181 return false 26182 } 26183 func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { 26184 v_1 := v.Args[1] 26185 v_0 := v.Args[0] 26186 // match: (XORL (SHLL (MOVLconst [1]) y) x) 26187 // result: (BTCL x y) 26188 for { 26189 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 26190 if v_0.Op != OpAMD64SHLL { 26191 continue 26192 } 26193 y := v_0.Args[1] 26194 v_0_0 := v_0.Args[0] 26195 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 { 26196 continue 26197 } 26198 x := v_1 26199 v.reset(OpAMD64BTCL) 26200 v.AddArg2(x, y) 26201 return true 26202 } 26203 break 26204 } 26205 // match: (XORL (MOVLconst [c]) x) 26206 // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128 26207 // result: (BTCLconst [int8(log32(c))] x) 26208 for { 26209 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 26210 if v_0.Op != OpAMD64MOVLconst { 26211 continue 26212 } 26213 c := auxIntToInt32(v_0.AuxInt) 26214 x := v_1 26215 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) { 26216 continue 26217 } 26218 v.reset(OpAMD64BTCLconst) 26219 v.AuxInt = int8ToAuxInt(int8(log32(c))) 26220 v.AddArg(x) 26221 return true 26222 } 26223 break 26224 } 26225 // match: (XORL x (MOVLconst [c])) 26226 // result: (XORLconst [c] x) 26227 for { 26228 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 26229 x := v_0 26230 if v_1.Op != OpAMD64MOVLconst { 26231 continue 26232 } 26233 c := auxIntToInt32(v_1.AuxInt) 26234 v.reset(OpAMD64XORLconst) 26235 v.AuxInt = int32ToAuxInt(c) 26236 v.AddArg(x) 26237 return true 26238 } 26239 break 26240 } 26241 // match: (XORL x x) 26242 // result: (MOVLconst [0]) 26243 for { 26244 x := v_0 26245 if x != v_1 { 26246 break 26247 } 26248 v.reset(OpAMD64MOVLconst) 26249 v.AuxInt = int32ToAuxInt(0) 26250 return true 26251 } 26252 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 26253 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 26254 // result: (XORLload x [off] {sym} ptr mem) 26255 for { 26256 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 26257 x := v_0 26258 l := v_1 26259 if l.Op != OpAMD64MOVLload { 26260 continue 26261 } 26262 off := auxIntToInt32(l.AuxInt) 26263 sym := auxToSym(l.Aux) 26264 mem := l.Args[1] 26265 ptr := l.Args[0] 26266 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 26267 continue 26268 } 26269 v.reset(OpAMD64XORLload) 26270 v.AuxInt = int32ToAuxInt(off) 26271 v.Aux = symToAux(sym) 26272 v.AddArg3(x, ptr, mem) 26273 return true 26274 } 26275 break 26276 } 26277 // match: (XORL x (ADDLconst [-1] x)) 26278 // cond: buildcfg.GOAMD64 >= 3 26279 // result: (BLSMSKL x) 26280 for { 26281 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 26282 x := v_0 26283 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { 26284 continue 26285 } 26286 v.reset(OpAMD64BLSMSKL) 26287 v.AddArg(x) 26288 return true 26289 } 26290 break 26291 } 26292 return false 26293 } 26294 func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { 26295 v_0 := v.Args[0] 26296 // match: (XORLconst [c] x) 26297 // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128 26298 // result: (BTCLconst [int8(log32(c))] x) 26299 for { 26300 c := auxIntToInt32(v.AuxInt) 26301 x := v_0 26302 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) { 26303 break 26304 } 26305 v.reset(OpAMD64BTCLconst) 26306 v.AuxInt = int8ToAuxInt(int8(log32(c))) 26307 v.AddArg(x) 26308 return true 26309 } 26310 // match: (XORLconst [1] (SETNE x)) 26311 // result: (SETEQ x) 26312 for { 26313 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE { 26314 break 26315 } 26316 x := v_0.Args[0] 26317 v.reset(OpAMD64SETEQ) 26318 v.AddArg(x) 26319 return true 26320 } 26321 // match: (XORLconst [1] (SETEQ x)) 26322 // result: (SETNE x) 26323 for { 26324 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ { 26325 break 26326 } 26327 x := v_0.Args[0] 26328 v.reset(OpAMD64SETNE) 26329 v.AddArg(x) 26330 return true 26331 } 26332 // match: (XORLconst [1] (SETL x)) 26333 // result: (SETGE x) 26334 for { 26335 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL { 26336 break 26337 } 26338 x := v_0.Args[0] 26339 v.reset(OpAMD64SETGE) 26340 v.AddArg(x) 26341 return true 26342 } 26343 // match: (XORLconst [1] (SETGE x)) 26344 // result: (SETL x) 26345 for { 26346 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE { 26347 break 26348 } 26349 x := v_0.Args[0] 26350 v.reset(OpAMD64SETL) 26351 v.AddArg(x) 26352 return true 26353 } 26354 // match: (XORLconst [1] (SETLE x)) 26355 // result: (SETG x) 26356 for { 26357 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE { 26358 break 26359 } 26360 x := v_0.Args[0] 26361 v.reset(OpAMD64SETG) 26362 v.AddArg(x) 26363 return true 26364 } 26365 // match: (XORLconst [1] (SETG x)) 26366 // result: (SETLE x) 26367 for { 26368 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG { 26369 break 26370 } 26371 x := v_0.Args[0] 26372 v.reset(OpAMD64SETLE) 26373 v.AddArg(x) 26374 return true 26375 } 26376 // match: (XORLconst [1] (SETB x)) 26377 // result: (SETAE x) 26378 for { 26379 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB { 26380 break 26381 } 26382 x := v_0.Args[0] 26383 v.reset(OpAMD64SETAE) 26384 v.AddArg(x) 26385 return true 26386 } 26387 // match: (XORLconst [1] (SETAE x)) 26388 // result: (SETB x) 26389 for { 26390 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE { 26391 break 26392 } 26393 x := v_0.Args[0] 26394 v.reset(OpAMD64SETB) 26395 v.AddArg(x) 26396 return true 26397 } 26398 // match: (XORLconst [1] (SETBE x)) 26399 // result: (SETA x) 26400 for { 26401 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE { 26402 break 26403 } 26404 x := v_0.Args[0] 26405 v.reset(OpAMD64SETA) 26406 v.AddArg(x) 26407 return true 26408 } 26409 // match: (XORLconst [1] (SETA x)) 26410 // result: (SETBE x) 26411 for { 26412 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA { 26413 break 26414 } 26415 x := v_0.Args[0] 26416 v.reset(OpAMD64SETBE) 26417 v.AddArg(x) 26418 return true 26419 } 26420 // match: (XORLconst [c] (XORLconst [d] x)) 26421 // result: (XORLconst [c ^ d] x) 26422 for { 26423 c := auxIntToInt32(v.AuxInt) 26424 if v_0.Op != OpAMD64XORLconst { 26425 break 26426 } 26427 d := auxIntToInt32(v_0.AuxInt) 26428 x := v_0.Args[0] 26429 v.reset(OpAMD64XORLconst) 26430 v.AuxInt = int32ToAuxInt(c ^ d) 26431 v.AddArg(x) 26432 return true 26433 } 26434 // match: (XORLconst [c] (BTCLconst [d] x)) 26435 // result: (XORLconst [c ^ 1<<uint32(d)] x) 26436 for { 26437 c := auxIntToInt32(v.AuxInt) 26438 if v_0.Op != OpAMD64BTCLconst { 26439 break 26440 } 26441 d := auxIntToInt8(v_0.AuxInt) 26442 x := v_0.Args[0] 26443 v.reset(OpAMD64XORLconst) 26444 v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d)) 26445 v.AddArg(x) 26446 return true 26447 } 26448 // match: (XORLconst [c] x) 26449 // cond: c==0 26450 // result: x 26451 for { 26452 c := auxIntToInt32(v.AuxInt) 26453 x := v_0 26454 if !(c == 0) { 26455 break 26456 } 26457 v.copyOf(x) 26458 return true 26459 } 26460 // match: (XORLconst [c] (MOVLconst [d])) 26461 // result: (MOVLconst [c^d]) 26462 for { 26463 c := auxIntToInt32(v.AuxInt) 26464 if v_0.Op != OpAMD64MOVLconst { 26465 break 26466 } 26467 d := auxIntToInt32(v_0.AuxInt) 26468 v.reset(OpAMD64MOVLconst) 26469 v.AuxInt = int32ToAuxInt(c ^ d) 26470 return true 26471 } 26472 return false 26473 } 26474 func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { 26475 v_1 := v.Args[1] 26476 v_0 := v.Args[0] 26477 // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 26478 // cond: ValAndOff(valoff1).canAdd32(off2) 26479 // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 26480 for { 26481 valoff1 := auxIntToValAndOff(v.AuxInt) 26482 sym := auxToSym(v.Aux) 26483 if v_0.Op != OpAMD64ADDQconst { 26484 break 26485 } 26486 off2 := auxIntToInt32(v_0.AuxInt) 26487 base := v_0.Args[0] 26488 mem := v_1 26489 if !(ValAndOff(valoff1).canAdd32(off2)) { 26490 break 26491 } 26492 v.reset(OpAMD64XORLconstmodify) 26493 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 26494 v.Aux = symToAux(sym) 26495 v.AddArg2(base, mem) 26496 return true 26497 } 26498 // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 26499 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 26500 // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 26501 for { 26502 valoff1 := auxIntToValAndOff(v.AuxInt) 26503 sym1 := auxToSym(v.Aux) 26504 if v_0.Op != OpAMD64LEAQ { 26505 break 26506 } 26507 off2 := auxIntToInt32(v_0.AuxInt) 26508 sym2 := auxToSym(v_0.Aux) 26509 base := v_0.Args[0] 26510 mem := v_1 26511 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 26512 break 26513 } 26514 v.reset(OpAMD64XORLconstmodify) 26515 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 26516 v.Aux = symToAux(mergeSym(sym1, sym2)) 26517 v.AddArg2(base, mem) 26518 return true 26519 } 26520 return false 26521 } 26522 func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { 26523 v_2 := v.Args[2] 26524 v_1 := v.Args[1] 26525 v_0 := v.Args[0] 26526 b := v.Block 26527 typ := &b.Func.Config.Types 26528 // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem) 26529 // cond: is32Bit(int64(off1)+int64(off2)) 26530 // result: (XORLload [off1+off2] {sym} val base mem) 26531 for { 26532 off1 := auxIntToInt32(v.AuxInt) 26533 sym := auxToSym(v.Aux) 26534 val := v_0 26535 if v_1.Op != OpAMD64ADDQconst { 26536 break 26537 } 26538 off2 := auxIntToInt32(v_1.AuxInt) 26539 base := v_1.Args[0] 26540 mem := v_2 26541 if !(is32Bit(int64(off1) + int64(off2))) { 26542 break 26543 } 26544 v.reset(OpAMD64XORLload) 26545 v.AuxInt = int32ToAuxInt(off1 + off2) 26546 v.Aux = symToAux(sym) 26547 v.AddArg3(val, base, mem) 26548 return true 26549 } 26550 // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 26551 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 26552 // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 26553 for { 26554 off1 := auxIntToInt32(v.AuxInt) 26555 sym1 := auxToSym(v.Aux) 26556 val := v_0 26557 if v_1.Op != OpAMD64LEAQ { 26558 break 26559 } 26560 off2 := auxIntToInt32(v_1.AuxInt) 26561 sym2 := auxToSym(v_1.Aux) 26562 base := v_1.Args[0] 26563 mem := v_2 26564 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 26565 break 26566 } 26567 v.reset(OpAMD64XORLload) 26568 v.AuxInt = int32ToAuxInt(off1 + off2) 26569 v.Aux = symToAux(mergeSym(sym1, sym2)) 26570 v.AddArg3(val, base, mem) 26571 return true 26572 } 26573 // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 26574 // result: (XORL x (MOVLf2i y)) 26575 for { 26576 off := auxIntToInt32(v.AuxInt) 26577 sym := auxToSym(v.Aux) 26578 x := v_0 26579 ptr := v_1 26580 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 26581 break 26582 } 26583 y := v_2.Args[1] 26584 if ptr != v_2.Args[0] { 26585 break 26586 } 26587 v.reset(OpAMD64XORL) 26588 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 26589 v0.AddArg(y) 26590 v.AddArg2(x, v0) 26591 return true 26592 } 26593 return false 26594 } 26595 func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool { 26596 v_2 := v.Args[2] 26597 v_1 := v.Args[1] 26598 v_0 := v.Args[0] 26599 // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 26600 // cond: is32Bit(int64(off1)+int64(off2)) 26601 // result: (XORLmodify [off1+off2] {sym} base val mem) 26602 for { 26603 off1 := auxIntToInt32(v.AuxInt) 26604 sym := auxToSym(v.Aux) 26605 if v_0.Op != OpAMD64ADDQconst { 26606 break 26607 } 26608 off2 := auxIntToInt32(v_0.AuxInt) 26609 base := v_0.Args[0] 26610 val := v_1 26611 mem := v_2 26612 if !(is32Bit(int64(off1) + int64(off2))) { 26613 break 26614 } 26615 v.reset(OpAMD64XORLmodify) 26616 v.AuxInt = int32ToAuxInt(off1 + off2) 26617 v.Aux = symToAux(sym) 26618 v.AddArg3(base, val, mem) 26619 return true 26620 } 26621 // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 26622 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 26623 // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 26624 for { 26625 off1 := auxIntToInt32(v.AuxInt) 26626 sym1 := auxToSym(v.Aux) 26627 if v_0.Op != OpAMD64LEAQ { 26628 break 26629 } 26630 off2 := auxIntToInt32(v_0.AuxInt) 26631 sym2 := auxToSym(v_0.Aux) 26632 base := v_0.Args[0] 26633 val := v_1 26634 mem := v_2 26635 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 26636 break 26637 } 26638 v.reset(OpAMD64XORLmodify) 26639 v.AuxInt = int32ToAuxInt(off1 + off2) 26640 v.Aux = symToAux(mergeSym(sym1, sym2)) 26641 v.AddArg3(base, val, mem) 26642 return true 26643 } 26644 return false 26645 } 26646 func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { 26647 v_1 := v.Args[1] 26648 v_0 := v.Args[0] 26649 // match: (XORQ (SHLQ (MOVQconst [1]) y) x) 26650 // result: (BTCQ x y) 26651 for { 26652 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 26653 if v_0.Op != OpAMD64SHLQ { 26654 continue 26655 } 26656 y := v_0.Args[1] 26657 v_0_0 := v_0.Args[0] 26658 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 { 26659 continue 26660 } 26661 x := v_1 26662 v.reset(OpAMD64BTCQ) 26663 v.AddArg2(x, y) 26664 return true 26665 } 26666 break 26667 } 26668 // match: (XORQ (MOVQconst [c]) x) 26669 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 26670 // result: (BTCQconst [int8(log64(c))] x) 26671 for { 26672 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 26673 if v_0.Op != OpAMD64MOVQconst { 26674 continue 26675 } 26676 c := auxIntToInt64(v_0.AuxInt) 26677 x := v_1 26678 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { 26679 continue 26680 } 26681 v.reset(OpAMD64BTCQconst) 26682 v.AuxInt = int8ToAuxInt(int8(log64(c))) 26683 v.AddArg(x) 26684 return true 26685 } 26686 break 26687 } 26688 // match: (XORQ x (MOVQconst [c])) 26689 // cond: is32Bit(c) 26690 // result: (XORQconst [int32(c)] x) 26691 for { 26692 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 26693 x := v_0 26694 if v_1.Op != OpAMD64MOVQconst { 26695 continue 26696 } 26697 c := auxIntToInt64(v_1.AuxInt) 26698 if !(is32Bit(c)) { 26699 continue 26700 } 26701 v.reset(OpAMD64XORQconst) 26702 v.AuxInt = int32ToAuxInt(int32(c)) 26703 v.AddArg(x) 26704 return true 26705 } 26706 break 26707 } 26708 // match: (XORQ x x) 26709 // result: (MOVQconst [0]) 26710 for { 26711 x := v_0 26712 if x != v_1 { 26713 break 26714 } 26715 v.reset(OpAMD64MOVQconst) 26716 v.AuxInt = int64ToAuxInt(0) 26717 return true 26718 } 26719 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 26720 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 26721 // result: (XORQload x [off] {sym} ptr mem) 26722 for { 26723 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 26724 x := v_0 26725 l := v_1 26726 if l.Op != OpAMD64MOVQload { 26727 continue 26728 } 26729 off := auxIntToInt32(l.AuxInt) 26730 sym := auxToSym(l.Aux) 26731 mem := l.Args[1] 26732 ptr := l.Args[0] 26733 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 26734 continue 26735 } 26736 v.reset(OpAMD64XORQload) 26737 v.AuxInt = int32ToAuxInt(off) 26738 v.Aux = symToAux(sym) 26739 v.AddArg3(x, ptr, mem) 26740 return true 26741 } 26742 break 26743 } 26744 // match: (XORQ x (ADDQconst [-1] x)) 26745 // cond: buildcfg.GOAMD64 >= 3 26746 // result: (BLSMSKQ x) 26747 for { 26748 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 26749 x := v_0 26750 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { 26751 continue 26752 } 26753 v.reset(OpAMD64BLSMSKQ) 26754 v.AddArg(x) 26755 return true 26756 } 26757 break 26758 } 26759 return false 26760 } 26761 func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool { 26762 v_0 := v.Args[0] 26763 // match: (XORQconst [c] x) 26764 // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128 26765 // result: (BTCQconst [int8(log32(c))] x) 26766 for { 26767 c := auxIntToInt32(v.AuxInt) 26768 x := v_0 26769 if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) { 26770 break 26771 } 26772 v.reset(OpAMD64BTCQconst) 26773 v.AuxInt = int8ToAuxInt(int8(log32(c))) 26774 v.AddArg(x) 26775 return true 26776 } 26777 // match: (XORQconst [c] (XORQconst [d] x)) 26778 // result: (XORQconst [c ^ d] x) 26779 for { 26780 c := auxIntToInt32(v.AuxInt) 26781 if v_0.Op != OpAMD64XORQconst { 26782 break 26783 } 26784 d := auxIntToInt32(v_0.AuxInt) 26785 x := v_0.Args[0] 26786 v.reset(OpAMD64XORQconst) 26787 v.AuxInt = int32ToAuxInt(c ^ d) 26788 v.AddArg(x) 26789 return true 26790 } 26791 // match: (XORQconst [c] (BTCQconst [d] x)) 26792 // cond: is32Bit(int64(c) ^ 1<<uint32(d)) 26793 // result: (XORQconst [c ^ 1<<uint32(d)] x) 26794 for { 26795 c := auxIntToInt32(v.AuxInt) 26796 if v_0.Op != OpAMD64BTCQconst { 26797 break 26798 } 26799 d := auxIntToInt8(v_0.AuxInt) 26800 x := v_0.Args[0] 26801 if !(is32Bit(int64(c) ^ 1<<uint32(d))) { 26802 break 26803 } 26804 v.reset(OpAMD64XORQconst) 26805 v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d)) 26806 v.AddArg(x) 26807 return true 26808 } 26809 // match: (XORQconst [0] x) 26810 // result: x 26811 for { 26812 if auxIntToInt32(v.AuxInt) != 0 { 26813 break 26814 } 26815 x := v_0 26816 v.copyOf(x) 26817 return true 26818 } 26819 // match: (XORQconst [c] (MOVQconst [d])) 26820 // result: (MOVQconst [int64(c)^d]) 26821 for { 26822 c := auxIntToInt32(v.AuxInt) 26823 if v_0.Op != OpAMD64MOVQconst { 26824 break 26825 } 26826 d := auxIntToInt64(v_0.AuxInt) 26827 v.reset(OpAMD64MOVQconst) 26828 v.AuxInt = int64ToAuxInt(int64(c) ^ d) 26829 return true 26830 } 26831 return false 26832 } 26833 func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool { 26834 v_1 := v.Args[1] 26835 v_0 := v.Args[0] 26836 // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 26837 // cond: ValAndOff(valoff1).canAdd32(off2) 26838 // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 26839 for { 26840 valoff1 := auxIntToValAndOff(v.AuxInt) 26841 sym := auxToSym(v.Aux) 26842 if v_0.Op != OpAMD64ADDQconst { 26843 break 26844 } 26845 off2 := auxIntToInt32(v_0.AuxInt) 26846 base := v_0.Args[0] 26847 mem := v_1 26848 if !(ValAndOff(valoff1).canAdd32(off2)) { 26849 break 26850 } 26851 v.reset(OpAMD64XORQconstmodify) 26852 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 26853 v.Aux = symToAux(sym) 26854 v.AddArg2(base, mem) 26855 return true 26856 } 26857 // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 26858 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 26859 // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 26860 for { 26861 valoff1 := auxIntToValAndOff(v.AuxInt) 26862 sym1 := auxToSym(v.Aux) 26863 if v_0.Op != OpAMD64LEAQ { 26864 break 26865 } 26866 off2 := auxIntToInt32(v_0.AuxInt) 26867 sym2 := auxToSym(v_0.Aux) 26868 base := v_0.Args[0] 26869 mem := v_1 26870 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 26871 break 26872 } 26873 v.reset(OpAMD64XORQconstmodify) 26874 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 26875 v.Aux = symToAux(mergeSym(sym1, sym2)) 26876 v.AddArg2(base, mem) 26877 return true 26878 } 26879 return false 26880 } 26881 func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { 26882 v_2 := v.Args[2] 26883 v_1 := v.Args[1] 26884 v_0 := v.Args[0] 26885 b := v.Block 26886 typ := &b.Func.Config.Types 26887 // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem) 26888 // cond: is32Bit(int64(off1)+int64(off2)) 26889 // result: (XORQload [off1+off2] {sym} val base mem) 26890 for { 26891 off1 := auxIntToInt32(v.AuxInt) 26892 sym := auxToSym(v.Aux) 26893 val := v_0 26894 if v_1.Op != OpAMD64ADDQconst { 26895 break 26896 } 26897 off2 := auxIntToInt32(v_1.AuxInt) 26898 base := v_1.Args[0] 26899 mem := v_2 26900 if !(is32Bit(int64(off1) + int64(off2))) { 26901 break 26902 } 26903 v.reset(OpAMD64XORQload) 26904 v.AuxInt = int32ToAuxInt(off1 + off2) 26905 v.Aux = symToAux(sym) 26906 v.AddArg3(val, base, mem) 26907 return true 26908 } 26909 // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 26910 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 26911 // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 26912 for { 26913 off1 := auxIntToInt32(v.AuxInt) 26914 sym1 := auxToSym(v.Aux) 26915 val := v_0 26916 if v_1.Op != OpAMD64LEAQ { 26917 break 26918 } 26919 off2 := auxIntToInt32(v_1.AuxInt) 26920 sym2 := auxToSym(v_1.Aux) 26921 base := v_1.Args[0] 26922 mem := v_2 26923 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 26924 break 26925 } 26926 v.reset(OpAMD64XORQload) 26927 v.AuxInt = int32ToAuxInt(off1 + off2) 26928 v.Aux = symToAux(mergeSym(sym1, sym2)) 26929 v.AddArg3(val, base, mem) 26930 return true 26931 } 26932 // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 26933 // result: (XORQ x (MOVQf2i y)) 26934 for { 26935 off := auxIntToInt32(v.AuxInt) 26936 sym := auxToSym(v.Aux) 26937 x := v_0 26938 ptr := v_1 26939 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 26940 break 26941 } 26942 y := v_2.Args[1] 26943 if ptr != v_2.Args[0] { 26944 break 26945 } 26946 v.reset(OpAMD64XORQ) 26947 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 26948 v0.AddArg(y) 26949 v.AddArg2(x, v0) 26950 return true 26951 } 26952 return false 26953 } 26954 func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { 26955 v_2 := v.Args[2] 26956 v_1 := v.Args[1] 26957 v_0 := v.Args[0] 26958 // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 26959 // cond: is32Bit(int64(off1)+int64(off2)) 26960 // result: (XORQmodify [off1+off2] {sym} base val mem) 26961 for { 26962 off1 := auxIntToInt32(v.AuxInt) 26963 sym := auxToSym(v.Aux) 26964 if v_0.Op != OpAMD64ADDQconst { 26965 break 26966 } 26967 off2 := auxIntToInt32(v_0.AuxInt) 26968 base := v_0.Args[0] 26969 val := v_1 26970 mem := v_2 26971 if !(is32Bit(int64(off1) + int64(off2))) { 26972 break 26973 } 26974 v.reset(OpAMD64XORQmodify) 26975 v.AuxInt = int32ToAuxInt(off1 + off2) 26976 v.Aux = symToAux(sym) 26977 v.AddArg3(base, val, mem) 26978 return true 26979 } 26980 // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 26981 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 26982 // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 26983 for { 26984 off1 := auxIntToInt32(v.AuxInt) 26985 sym1 := auxToSym(v.Aux) 26986 if v_0.Op != OpAMD64LEAQ { 26987 break 26988 } 26989 off2 := auxIntToInt32(v_0.AuxInt) 26990 sym2 := auxToSym(v_0.Aux) 26991 base := v_0.Args[0] 26992 val := v_1 26993 mem := v_2 26994 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 26995 break 26996 } 26997 v.reset(OpAMD64XORQmodify) 26998 v.AuxInt = int32ToAuxInt(off1 + off2) 26999 v.Aux = symToAux(mergeSym(sym1, sym2)) 27000 v.AddArg3(base, val, mem) 27001 return true 27002 } 27003 return false 27004 } 27005 func rewriteValueAMD64_OpAddr(v *Value) bool { 27006 v_0 := v.Args[0] 27007 // match: (Addr {sym} base) 27008 // result: (LEAQ {sym} base) 27009 for { 27010 sym := auxToSym(v.Aux) 27011 base := v_0 27012 v.reset(OpAMD64LEAQ) 27013 v.Aux = symToAux(sym) 27014 v.AddArg(base) 27015 return true 27016 } 27017 } 27018 func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { 27019 v_2 := v.Args[2] 27020 v_1 := v.Args[1] 27021 v_0 := v.Args[0] 27022 b := v.Block 27023 typ := &b.Func.Config.Types 27024 // match: (AtomicAdd32 ptr val mem) 27025 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 27026 for { 27027 ptr := v_0 27028 val := v_1 27029 mem := v_2 27030 v.reset(OpAMD64AddTupleFirst32) 27031 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 27032 v0.AddArg3(val, ptr, mem) 27033 v.AddArg2(val, v0) 27034 return true 27035 } 27036 } 27037 func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { 27038 v_2 := v.Args[2] 27039 v_1 := v.Args[1] 27040 v_0 := v.Args[0] 27041 b := v.Block 27042 typ := &b.Func.Config.Types 27043 // match: (AtomicAdd64 ptr val mem) 27044 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 27045 for { 27046 ptr := v_0 27047 val := v_1 27048 mem := v_2 27049 v.reset(OpAMD64AddTupleFirst64) 27050 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 27051 v0.AddArg3(val, ptr, mem) 27052 v.AddArg2(val, v0) 27053 return true 27054 } 27055 } 27056 func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { 27057 v_2 := v.Args[2] 27058 v_1 := v.Args[1] 27059 v_0 := v.Args[0] 27060 // match: (AtomicAnd32 ptr val mem) 27061 // result: (ANDLlock ptr val mem) 27062 for { 27063 ptr := v_0 27064 val := v_1 27065 mem := v_2 27066 v.reset(OpAMD64ANDLlock) 27067 v.AddArg3(ptr, val, mem) 27068 return true 27069 } 27070 } 27071 func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { 27072 v_2 := v.Args[2] 27073 v_1 := v.Args[1] 27074 v_0 := v.Args[0] 27075 // match: (AtomicAnd8 ptr val mem) 27076 // result: (ANDBlock ptr val mem) 27077 for { 27078 ptr := v_0 27079 val := v_1 27080 mem := v_2 27081 v.reset(OpAMD64ANDBlock) 27082 v.AddArg3(ptr, val, mem) 27083 return true 27084 } 27085 } 27086 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { 27087 v_3 := v.Args[3] 27088 v_2 := v.Args[2] 27089 v_1 := v.Args[1] 27090 v_0 := v.Args[0] 27091 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 27092 // result: (CMPXCHGLlock ptr old new_ mem) 27093 for { 27094 ptr := v_0 27095 old := v_1 27096 new_ := v_2 27097 mem := v_3 27098 v.reset(OpAMD64CMPXCHGLlock) 27099 v.AddArg4(ptr, old, new_, mem) 27100 return true 27101 } 27102 } 27103 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { 27104 v_3 := v.Args[3] 27105 v_2 := v.Args[2] 27106 v_1 := v.Args[1] 27107 v_0 := v.Args[0] 27108 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 27109 // result: (CMPXCHGQlock ptr old new_ mem) 27110 for { 27111 ptr := v_0 27112 old := v_1 27113 new_ := v_2 27114 mem := v_3 27115 v.reset(OpAMD64CMPXCHGQlock) 27116 v.AddArg4(ptr, old, new_, mem) 27117 return true 27118 } 27119 } 27120 func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { 27121 v_2 := v.Args[2] 27122 v_1 := v.Args[1] 27123 v_0 := v.Args[0] 27124 // match: (AtomicExchange32 ptr val mem) 27125 // result: (XCHGL val ptr mem) 27126 for { 27127 ptr := v_0 27128 val := v_1 27129 mem := v_2 27130 v.reset(OpAMD64XCHGL) 27131 v.AddArg3(val, ptr, mem) 27132 return true 27133 } 27134 } 27135 func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { 27136 v_2 := v.Args[2] 27137 v_1 := v.Args[1] 27138 v_0 := v.Args[0] 27139 // match: (AtomicExchange64 ptr val mem) 27140 // result: (XCHGQ val ptr mem) 27141 for { 27142 ptr := v_0 27143 val := v_1 27144 mem := v_2 27145 v.reset(OpAMD64XCHGQ) 27146 v.AddArg3(val, ptr, mem) 27147 return true 27148 } 27149 } 27150 func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { 27151 v_1 := v.Args[1] 27152 v_0 := v.Args[0] 27153 // match: (AtomicLoad32 ptr mem) 27154 // result: (MOVLatomicload ptr mem) 27155 for { 27156 ptr := v_0 27157 mem := v_1 27158 v.reset(OpAMD64MOVLatomicload) 27159 v.AddArg2(ptr, mem) 27160 return true 27161 } 27162 } 27163 func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { 27164 v_1 := v.Args[1] 27165 v_0 := v.Args[0] 27166 // match: (AtomicLoad64 ptr mem) 27167 // result: (MOVQatomicload ptr mem) 27168 for { 27169 ptr := v_0 27170 mem := v_1 27171 v.reset(OpAMD64MOVQatomicload) 27172 v.AddArg2(ptr, mem) 27173 return true 27174 } 27175 } 27176 func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { 27177 v_1 := v.Args[1] 27178 v_0 := v.Args[0] 27179 // match: (AtomicLoad8 ptr mem) 27180 // result: (MOVBatomicload ptr mem) 27181 for { 27182 ptr := v_0 27183 mem := v_1 27184 v.reset(OpAMD64MOVBatomicload) 27185 v.AddArg2(ptr, mem) 27186 return true 27187 } 27188 } 27189 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { 27190 v_1 := v.Args[1] 27191 v_0 := v.Args[0] 27192 // match: (AtomicLoadPtr ptr mem) 27193 // result: (MOVQatomicload ptr mem) 27194 for { 27195 ptr := v_0 27196 mem := v_1 27197 v.reset(OpAMD64MOVQatomicload) 27198 v.AddArg2(ptr, mem) 27199 return true 27200 } 27201 } 27202 func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { 27203 v_2 := v.Args[2] 27204 v_1 := v.Args[1] 27205 v_0 := v.Args[0] 27206 // match: (AtomicOr32 ptr val mem) 27207 // result: (ORLlock ptr val mem) 27208 for { 27209 ptr := v_0 27210 val := v_1 27211 mem := v_2 27212 v.reset(OpAMD64ORLlock) 27213 v.AddArg3(ptr, val, mem) 27214 return true 27215 } 27216 } 27217 func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { 27218 v_2 := v.Args[2] 27219 v_1 := v.Args[1] 27220 v_0 := v.Args[0] 27221 // match: (AtomicOr8 ptr val mem) 27222 // result: (ORBlock ptr val mem) 27223 for { 27224 ptr := v_0 27225 val := v_1 27226 mem := v_2 27227 v.reset(OpAMD64ORBlock) 27228 v.AddArg3(ptr, val, mem) 27229 return true 27230 } 27231 } 27232 func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { 27233 v_2 := v.Args[2] 27234 v_1 := v.Args[1] 27235 v_0 := v.Args[0] 27236 b := v.Block 27237 typ := &b.Func.Config.Types 27238 // match: (AtomicStore32 ptr val mem) 27239 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 27240 for { 27241 ptr := v_0 27242 val := v_1 27243 mem := v_2 27244 v.reset(OpSelect1) 27245 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 27246 v0.AddArg3(val, ptr, mem) 27247 v.AddArg(v0) 27248 return true 27249 } 27250 } 27251 func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { 27252 v_2 := v.Args[2] 27253 v_1 := v.Args[1] 27254 v_0 := v.Args[0] 27255 b := v.Block 27256 typ := &b.Func.Config.Types 27257 // match: (AtomicStore64 ptr val mem) 27258 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 27259 for { 27260 ptr := v_0 27261 val := v_1 27262 mem := v_2 27263 v.reset(OpSelect1) 27264 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 27265 v0.AddArg3(val, ptr, mem) 27266 v.AddArg(v0) 27267 return true 27268 } 27269 } 27270 func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { 27271 v_2 := v.Args[2] 27272 v_1 := v.Args[1] 27273 v_0 := v.Args[0] 27274 b := v.Block 27275 typ := &b.Func.Config.Types 27276 // match: (AtomicStore8 ptr val mem) 27277 // result: (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem)) 27278 for { 27279 ptr := v_0 27280 val := v_1 27281 mem := v_2 27282 v.reset(OpSelect1) 27283 v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) 27284 v0.AddArg3(val, ptr, mem) 27285 v.AddArg(v0) 27286 return true 27287 } 27288 } 27289 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { 27290 v_2 := v.Args[2] 27291 v_1 := v.Args[1] 27292 v_0 := v.Args[0] 27293 b := v.Block 27294 typ := &b.Func.Config.Types 27295 // match: (AtomicStorePtrNoWB ptr val mem) 27296 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 27297 for { 27298 ptr := v_0 27299 val := v_1 27300 mem := v_2 27301 v.reset(OpSelect1) 27302 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 27303 v0.AddArg3(val, ptr, mem) 27304 v.AddArg(v0) 27305 return true 27306 } 27307 } 27308 func rewriteValueAMD64_OpBitLen16(v *Value) bool { 27309 v_0 := v.Args[0] 27310 b := v.Block 27311 typ := &b.Func.Config.Types 27312 // match: (BitLen16 x) 27313 // cond: buildcfg.GOAMD64 < 3 27314 // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x))) 27315 for { 27316 x := v_0 27317 if !(buildcfg.GOAMD64 < 3) { 27318 break 27319 } 27320 v.reset(OpAMD64BSRL) 27321 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) 27322 v0.AuxInt = int32ToAuxInt(1) 27323 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 27324 v1.AddArg(x) 27325 v0.AddArg2(v1, v1) 27326 v.AddArg(v0) 27327 return true 27328 } 27329 // match: (BitLen16 <t> x) 27330 // cond: buildcfg.GOAMD64 >= 3 27331 // result: (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVWQZX <x.Type> x)))) 27332 for { 27333 t := v.Type 27334 x := v_0 27335 if !(buildcfg.GOAMD64 >= 3) { 27336 break 27337 } 27338 v.reset(OpAMD64NEGQ) 27339 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) 27340 v0.AuxInt = int32ToAuxInt(-32) 27341 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) 27342 v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) 27343 v2.AddArg(x) 27344 v1.AddArg(v2) 27345 v0.AddArg(v1) 27346 v.AddArg(v0) 27347 return true 27348 } 27349 return false 27350 } 27351 func rewriteValueAMD64_OpBitLen32(v *Value) bool { 27352 v_0 := v.Args[0] 27353 b := v.Block 27354 typ := &b.Func.Config.Types 27355 // match: (BitLen32 x) 27356 // cond: buildcfg.GOAMD64 < 3 27357 // result: (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x)))) 27358 for { 27359 x := v_0 27360 if !(buildcfg.GOAMD64 < 3) { 27361 break 27362 } 27363 v.reset(OpSelect0) 27364 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 27365 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) 27366 v1.AuxInt = int32ToAuxInt(1) 27367 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 27368 v2.AddArg(x) 27369 v1.AddArg2(v2, v2) 27370 v0.AddArg(v1) 27371 v.AddArg(v0) 27372 return true 27373 } 27374 // match: (BitLen32 <t> x) 27375 // cond: buildcfg.GOAMD64 >= 3 27376 // result: (NEGQ (ADDQconst <t> [-32] (LZCNTL x))) 27377 for { 27378 t := v.Type 27379 x := v_0 27380 if !(buildcfg.GOAMD64 >= 3) { 27381 break 27382 } 27383 v.reset(OpAMD64NEGQ) 27384 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) 27385 v0.AuxInt = int32ToAuxInt(-32) 27386 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) 27387 v1.AddArg(x) 27388 v0.AddArg(v1) 27389 v.AddArg(v0) 27390 return true 27391 } 27392 return false 27393 } 27394 func rewriteValueAMD64_OpBitLen64(v *Value) bool { 27395 v_0 := v.Args[0] 27396 b := v.Block 27397 typ := &b.Func.Config.Types 27398 // match: (BitLen64 <t> x) 27399 // cond: buildcfg.GOAMD64 < 3 27400 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 27401 for { 27402 t := v.Type 27403 x := v_0 27404 if !(buildcfg.GOAMD64 < 3) { 27405 break 27406 } 27407 v.reset(OpAMD64ADDQconst) 27408 v.AuxInt = int32ToAuxInt(1) 27409 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 27410 v1 := b.NewValue0(v.Pos, OpSelect0, t) 27411 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 27412 v2.AddArg(x) 27413 v1.AddArg(v2) 27414 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 27415 v3.AuxInt = int64ToAuxInt(-1) 27416 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 27417 v4.AddArg(v2) 27418 v0.AddArg3(v1, v3, v4) 27419 v.AddArg(v0) 27420 return true 27421 } 27422 // match: (BitLen64 <t> x) 27423 // cond: buildcfg.GOAMD64 >= 3 27424 // result: (NEGQ (ADDQconst <t> [-64] (LZCNTQ x))) 27425 for { 27426 t := v.Type 27427 x := v_0 27428 if !(buildcfg.GOAMD64 >= 3) { 27429 break 27430 } 27431 v.reset(OpAMD64NEGQ) 27432 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) 27433 v0.AuxInt = int32ToAuxInt(-64) 27434 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) 27435 v1.AddArg(x) 27436 v0.AddArg(v1) 27437 v.AddArg(v0) 27438 return true 27439 } 27440 return false 27441 } 27442 func rewriteValueAMD64_OpBitLen8(v *Value) bool { 27443 v_0 := v.Args[0] 27444 b := v.Block 27445 typ := &b.Func.Config.Types 27446 // match: (BitLen8 x) 27447 // cond: buildcfg.GOAMD64 < 3 27448 // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x))) 27449 for { 27450 x := v_0 27451 if !(buildcfg.GOAMD64 < 3) { 27452 break 27453 } 27454 v.reset(OpAMD64BSRL) 27455 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) 27456 v0.AuxInt = int32ToAuxInt(1) 27457 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 27458 v1.AddArg(x) 27459 v0.AddArg2(v1, v1) 27460 v.AddArg(v0) 27461 return true 27462 } 27463 // match: (BitLen8 <t> x) 27464 // cond: buildcfg.GOAMD64 >= 3 27465 // result: (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVBQZX <x.Type> x)))) 27466 for { 27467 t := v.Type 27468 x := v_0 27469 if !(buildcfg.GOAMD64 >= 3) { 27470 break 27471 } 27472 v.reset(OpAMD64NEGQ) 27473 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) 27474 v0.AuxInt = int32ToAuxInt(-32) 27475 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) 27476 v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) 27477 v2.AddArg(x) 27478 v1.AddArg(v2) 27479 v0.AddArg(v1) 27480 v.AddArg(v0) 27481 return true 27482 } 27483 return false 27484 } 27485 func rewriteValueAMD64_OpCeil(v *Value) bool { 27486 v_0 := v.Args[0] 27487 // match: (Ceil x) 27488 // result: (ROUNDSD [2] x) 27489 for { 27490 x := v_0 27491 v.reset(OpAMD64ROUNDSD) 27492 v.AuxInt = int8ToAuxInt(2) 27493 v.AddArg(x) 27494 return true 27495 } 27496 } 27497 func rewriteValueAMD64_OpCondSelect(v *Value) bool { 27498 v_2 := v.Args[2] 27499 v_1 := v.Args[1] 27500 v_0 := v.Args[0] 27501 b := v.Block 27502 typ := &b.Func.Config.Types 27503 // match: (CondSelect <t> x y (SETEQ cond)) 27504 // cond: (is64BitInt(t) || isPtr(t)) 27505 // result: (CMOVQEQ y x cond) 27506 for { 27507 t := v.Type 27508 x := v_0 27509 y := v_1 27510 if v_2.Op != OpAMD64SETEQ { 27511 break 27512 } 27513 cond := v_2.Args[0] 27514 if !(is64BitInt(t) || isPtr(t)) { 27515 break 27516 } 27517 v.reset(OpAMD64CMOVQEQ) 27518 v.AddArg3(y, x, cond) 27519 return true 27520 } 27521 // match: (CondSelect <t> x y (SETNE cond)) 27522 // cond: (is64BitInt(t) || isPtr(t)) 27523 // result: (CMOVQNE y x cond) 27524 for { 27525 t := v.Type 27526 x := v_0 27527 y := v_1 27528 if v_2.Op != OpAMD64SETNE { 27529 break 27530 } 27531 cond := v_2.Args[0] 27532 if !(is64BitInt(t) || isPtr(t)) { 27533 break 27534 } 27535 v.reset(OpAMD64CMOVQNE) 27536 v.AddArg3(y, x, cond) 27537 return true 27538 } 27539 // match: (CondSelect <t> x y (SETL cond)) 27540 // cond: (is64BitInt(t) || isPtr(t)) 27541 // result: (CMOVQLT y x cond) 27542 for { 27543 t := v.Type 27544 x := v_0 27545 y := v_1 27546 if v_2.Op != OpAMD64SETL { 27547 break 27548 } 27549 cond := v_2.Args[0] 27550 if !(is64BitInt(t) || isPtr(t)) { 27551 break 27552 } 27553 v.reset(OpAMD64CMOVQLT) 27554 v.AddArg3(y, x, cond) 27555 return true 27556 } 27557 // match: (CondSelect <t> x y (SETG cond)) 27558 // cond: (is64BitInt(t) || isPtr(t)) 27559 // result: (CMOVQGT y x cond) 27560 for { 27561 t := v.Type 27562 x := v_0 27563 y := v_1 27564 if v_2.Op != OpAMD64SETG { 27565 break 27566 } 27567 cond := v_2.Args[0] 27568 if !(is64BitInt(t) || isPtr(t)) { 27569 break 27570 } 27571 v.reset(OpAMD64CMOVQGT) 27572 v.AddArg3(y, x, cond) 27573 return true 27574 } 27575 // match: (CondSelect <t> x y (SETLE cond)) 27576 // cond: (is64BitInt(t) || isPtr(t)) 27577 // result: (CMOVQLE y x cond) 27578 for { 27579 t := v.Type 27580 x := v_0 27581 y := v_1 27582 if v_2.Op != OpAMD64SETLE { 27583 break 27584 } 27585 cond := v_2.Args[0] 27586 if !(is64BitInt(t) || isPtr(t)) { 27587 break 27588 } 27589 v.reset(OpAMD64CMOVQLE) 27590 v.AddArg3(y, x, cond) 27591 return true 27592 } 27593 // match: (CondSelect <t> x y (SETGE cond)) 27594 // cond: (is64BitInt(t) || isPtr(t)) 27595 // result: (CMOVQGE y x cond) 27596 for { 27597 t := v.Type 27598 x := v_0 27599 y := v_1 27600 if v_2.Op != OpAMD64SETGE { 27601 break 27602 } 27603 cond := v_2.Args[0] 27604 if !(is64BitInt(t) || isPtr(t)) { 27605 break 27606 } 27607 v.reset(OpAMD64CMOVQGE) 27608 v.AddArg3(y, x, cond) 27609 return true 27610 } 27611 // match: (CondSelect <t> x y (SETA cond)) 27612 // cond: (is64BitInt(t) || isPtr(t)) 27613 // result: (CMOVQHI y x cond) 27614 for { 27615 t := v.Type 27616 x := v_0 27617 y := v_1 27618 if v_2.Op != OpAMD64SETA { 27619 break 27620 } 27621 cond := v_2.Args[0] 27622 if !(is64BitInt(t) || isPtr(t)) { 27623 break 27624 } 27625 v.reset(OpAMD64CMOVQHI) 27626 v.AddArg3(y, x, cond) 27627 return true 27628 } 27629 // match: (CondSelect <t> x y (SETB cond)) 27630 // cond: (is64BitInt(t) || isPtr(t)) 27631 // result: (CMOVQCS y x cond) 27632 for { 27633 t := v.Type 27634 x := v_0 27635 y := v_1 27636 if v_2.Op != OpAMD64SETB { 27637 break 27638 } 27639 cond := v_2.Args[0] 27640 if !(is64BitInt(t) || isPtr(t)) { 27641 break 27642 } 27643 v.reset(OpAMD64CMOVQCS) 27644 v.AddArg3(y, x, cond) 27645 return true 27646 } 27647 // match: (CondSelect <t> x y (SETAE cond)) 27648 // cond: (is64BitInt(t) || isPtr(t)) 27649 // result: (CMOVQCC y x cond) 27650 for { 27651 t := v.Type 27652 x := v_0 27653 y := v_1 27654 if v_2.Op != OpAMD64SETAE { 27655 break 27656 } 27657 cond := v_2.Args[0] 27658 if !(is64BitInt(t) || isPtr(t)) { 27659 break 27660 } 27661 v.reset(OpAMD64CMOVQCC) 27662 v.AddArg3(y, x, cond) 27663 return true 27664 } 27665 // match: (CondSelect <t> x y (SETBE cond)) 27666 // cond: (is64BitInt(t) || isPtr(t)) 27667 // result: (CMOVQLS y x cond) 27668 for { 27669 t := v.Type 27670 x := v_0 27671 y := v_1 27672 if v_2.Op != OpAMD64SETBE { 27673 break 27674 } 27675 cond := v_2.Args[0] 27676 if !(is64BitInt(t) || isPtr(t)) { 27677 break 27678 } 27679 v.reset(OpAMD64CMOVQLS) 27680 v.AddArg3(y, x, cond) 27681 return true 27682 } 27683 // match: (CondSelect <t> x y (SETEQF cond)) 27684 // cond: (is64BitInt(t) || isPtr(t)) 27685 // result: (CMOVQEQF y x cond) 27686 for { 27687 t := v.Type 27688 x := v_0 27689 y := v_1 27690 if v_2.Op != OpAMD64SETEQF { 27691 break 27692 } 27693 cond := v_2.Args[0] 27694 if !(is64BitInt(t) || isPtr(t)) { 27695 break 27696 } 27697 v.reset(OpAMD64CMOVQEQF) 27698 v.AddArg3(y, x, cond) 27699 return true 27700 } 27701 // match: (CondSelect <t> x y (SETNEF cond)) 27702 // cond: (is64BitInt(t) || isPtr(t)) 27703 // result: (CMOVQNEF y x cond) 27704 for { 27705 t := v.Type 27706 x := v_0 27707 y := v_1 27708 if v_2.Op != OpAMD64SETNEF { 27709 break 27710 } 27711 cond := v_2.Args[0] 27712 if !(is64BitInt(t) || isPtr(t)) { 27713 break 27714 } 27715 v.reset(OpAMD64CMOVQNEF) 27716 v.AddArg3(y, x, cond) 27717 return true 27718 } 27719 // match: (CondSelect <t> x y (SETGF cond)) 27720 // cond: (is64BitInt(t) || isPtr(t)) 27721 // result: (CMOVQGTF y x cond) 27722 for { 27723 t := v.Type 27724 x := v_0 27725 y := v_1 27726 if v_2.Op != OpAMD64SETGF { 27727 break 27728 } 27729 cond := v_2.Args[0] 27730 if !(is64BitInt(t) || isPtr(t)) { 27731 break 27732 } 27733 v.reset(OpAMD64CMOVQGTF) 27734 v.AddArg3(y, x, cond) 27735 return true 27736 } 27737 // match: (CondSelect <t> x y (SETGEF cond)) 27738 // cond: (is64BitInt(t) || isPtr(t)) 27739 // result: (CMOVQGEF y x cond) 27740 for { 27741 t := v.Type 27742 x := v_0 27743 y := v_1 27744 if v_2.Op != OpAMD64SETGEF { 27745 break 27746 } 27747 cond := v_2.Args[0] 27748 if !(is64BitInt(t) || isPtr(t)) { 27749 break 27750 } 27751 v.reset(OpAMD64CMOVQGEF) 27752 v.AddArg3(y, x, cond) 27753 return true 27754 } 27755 // match: (CondSelect <t> x y (SETEQ cond)) 27756 // cond: is32BitInt(t) 27757 // result: (CMOVLEQ y x cond) 27758 for { 27759 t := v.Type 27760 x := v_0 27761 y := v_1 27762 if v_2.Op != OpAMD64SETEQ { 27763 break 27764 } 27765 cond := v_2.Args[0] 27766 if !(is32BitInt(t)) { 27767 break 27768 } 27769 v.reset(OpAMD64CMOVLEQ) 27770 v.AddArg3(y, x, cond) 27771 return true 27772 } 27773 // match: (CondSelect <t> x y (SETNE cond)) 27774 // cond: is32BitInt(t) 27775 // result: (CMOVLNE y x cond) 27776 for { 27777 t := v.Type 27778 x := v_0 27779 y := v_1 27780 if v_2.Op != OpAMD64SETNE { 27781 break 27782 } 27783 cond := v_2.Args[0] 27784 if !(is32BitInt(t)) { 27785 break 27786 } 27787 v.reset(OpAMD64CMOVLNE) 27788 v.AddArg3(y, x, cond) 27789 return true 27790 } 27791 // match: (CondSelect <t> x y (SETL cond)) 27792 // cond: is32BitInt(t) 27793 // result: (CMOVLLT y x cond) 27794 for { 27795 t := v.Type 27796 x := v_0 27797 y := v_1 27798 if v_2.Op != OpAMD64SETL { 27799 break 27800 } 27801 cond := v_2.Args[0] 27802 if !(is32BitInt(t)) { 27803 break 27804 } 27805 v.reset(OpAMD64CMOVLLT) 27806 v.AddArg3(y, x, cond) 27807 return true 27808 } 27809 // match: (CondSelect <t> x y (SETG cond)) 27810 // cond: is32BitInt(t) 27811 // result: (CMOVLGT y x cond) 27812 for { 27813 t := v.Type 27814 x := v_0 27815 y := v_1 27816 if v_2.Op != OpAMD64SETG { 27817 break 27818 } 27819 cond := v_2.Args[0] 27820 if !(is32BitInt(t)) { 27821 break 27822 } 27823 v.reset(OpAMD64CMOVLGT) 27824 v.AddArg3(y, x, cond) 27825 return true 27826 } 27827 // match: (CondSelect <t> x y (SETLE cond)) 27828 // cond: is32BitInt(t) 27829 // result: (CMOVLLE y x cond) 27830 for { 27831 t := v.Type 27832 x := v_0 27833 y := v_1 27834 if v_2.Op != OpAMD64SETLE { 27835 break 27836 } 27837 cond := v_2.Args[0] 27838 if !(is32BitInt(t)) { 27839 break 27840 } 27841 v.reset(OpAMD64CMOVLLE) 27842 v.AddArg3(y, x, cond) 27843 return true 27844 } 27845 // match: (CondSelect <t> x y (SETGE cond)) 27846 // cond: is32BitInt(t) 27847 // result: (CMOVLGE y x cond) 27848 for { 27849 t := v.Type 27850 x := v_0 27851 y := v_1 27852 if v_2.Op != OpAMD64SETGE { 27853 break 27854 } 27855 cond := v_2.Args[0] 27856 if !(is32BitInt(t)) { 27857 break 27858 } 27859 v.reset(OpAMD64CMOVLGE) 27860 v.AddArg3(y, x, cond) 27861 return true 27862 } 27863 // match: (CondSelect <t> x y (SETA cond)) 27864 // cond: is32BitInt(t) 27865 // result: (CMOVLHI y x cond) 27866 for { 27867 t := v.Type 27868 x := v_0 27869 y := v_1 27870 if v_2.Op != OpAMD64SETA { 27871 break 27872 } 27873 cond := v_2.Args[0] 27874 if !(is32BitInt(t)) { 27875 break 27876 } 27877 v.reset(OpAMD64CMOVLHI) 27878 v.AddArg3(y, x, cond) 27879 return true 27880 } 27881 // match: (CondSelect <t> x y (SETB cond)) 27882 // cond: is32BitInt(t) 27883 // result: (CMOVLCS y x cond) 27884 for { 27885 t := v.Type 27886 x := v_0 27887 y := v_1 27888 if v_2.Op != OpAMD64SETB { 27889 break 27890 } 27891 cond := v_2.Args[0] 27892 if !(is32BitInt(t)) { 27893 break 27894 } 27895 v.reset(OpAMD64CMOVLCS) 27896 v.AddArg3(y, x, cond) 27897 return true 27898 } 27899 // match: (CondSelect <t> x y (SETAE cond)) 27900 // cond: is32BitInt(t) 27901 // result: (CMOVLCC y x cond) 27902 for { 27903 t := v.Type 27904 x := v_0 27905 y := v_1 27906 if v_2.Op != OpAMD64SETAE { 27907 break 27908 } 27909 cond := v_2.Args[0] 27910 if !(is32BitInt(t)) { 27911 break 27912 } 27913 v.reset(OpAMD64CMOVLCC) 27914 v.AddArg3(y, x, cond) 27915 return true 27916 } 27917 // match: (CondSelect <t> x y (SETBE cond)) 27918 // cond: is32BitInt(t) 27919 // result: (CMOVLLS y x cond) 27920 for { 27921 t := v.Type 27922 x := v_0 27923 y := v_1 27924 if v_2.Op != OpAMD64SETBE { 27925 break 27926 } 27927 cond := v_2.Args[0] 27928 if !(is32BitInt(t)) { 27929 break 27930 } 27931 v.reset(OpAMD64CMOVLLS) 27932 v.AddArg3(y, x, cond) 27933 return true 27934 } 27935 // match: (CondSelect <t> x y (SETEQF cond)) 27936 // cond: is32BitInt(t) 27937 // result: (CMOVLEQF y x cond) 27938 for { 27939 t := v.Type 27940 x := v_0 27941 y := v_1 27942 if v_2.Op != OpAMD64SETEQF { 27943 break 27944 } 27945 cond := v_2.Args[0] 27946 if !(is32BitInt(t)) { 27947 break 27948 } 27949 v.reset(OpAMD64CMOVLEQF) 27950 v.AddArg3(y, x, cond) 27951 return true 27952 } 27953 // match: (CondSelect <t> x y (SETNEF cond)) 27954 // cond: is32BitInt(t) 27955 // result: (CMOVLNEF y x cond) 27956 for { 27957 t := v.Type 27958 x := v_0 27959 y := v_1 27960 if v_2.Op != OpAMD64SETNEF { 27961 break 27962 } 27963 cond := v_2.Args[0] 27964 if !(is32BitInt(t)) { 27965 break 27966 } 27967 v.reset(OpAMD64CMOVLNEF) 27968 v.AddArg3(y, x, cond) 27969 return true 27970 } 27971 // match: (CondSelect <t> x y (SETGF cond)) 27972 // cond: is32BitInt(t) 27973 // result: (CMOVLGTF y x cond) 27974 for { 27975 t := v.Type 27976 x := v_0 27977 y := v_1 27978 if v_2.Op != OpAMD64SETGF { 27979 break 27980 } 27981 cond := v_2.Args[0] 27982 if !(is32BitInt(t)) { 27983 break 27984 } 27985 v.reset(OpAMD64CMOVLGTF) 27986 v.AddArg3(y, x, cond) 27987 return true 27988 } 27989 // match: (CondSelect <t> x y (SETGEF cond)) 27990 // cond: is32BitInt(t) 27991 // result: (CMOVLGEF y x cond) 27992 for { 27993 t := v.Type 27994 x := v_0 27995 y := v_1 27996 if v_2.Op != OpAMD64SETGEF { 27997 break 27998 } 27999 cond := v_2.Args[0] 28000 if !(is32BitInt(t)) { 28001 break 28002 } 28003 v.reset(OpAMD64CMOVLGEF) 28004 v.AddArg3(y, x, cond) 28005 return true 28006 } 28007 // match: (CondSelect <t> x y (SETEQ cond)) 28008 // cond: is16BitInt(t) 28009 // result: (CMOVWEQ y x cond) 28010 for { 28011 t := v.Type 28012 x := v_0 28013 y := v_1 28014 if v_2.Op != OpAMD64SETEQ { 28015 break 28016 } 28017 cond := v_2.Args[0] 28018 if !(is16BitInt(t)) { 28019 break 28020 } 28021 v.reset(OpAMD64CMOVWEQ) 28022 v.AddArg3(y, x, cond) 28023 return true 28024 } 28025 // match: (CondSelect <t> x y (SETNE cond)) 28026 // cond: is16BitInt(t) 28027 // result: (CMOVWNE y x cond) 28028 for { 28029 t := v.Type 28030 x := v_0 28031 y := v_1 28032 if v_2.Op != OpAMD64SETNE { 28033 break 28034 } 28035 cond := v_2.Args[0] 28036 if !(is16BitInt(t)) { 28037 break 28038 } 28039 v.reset(OpAMD64CMOVWNE) 28040 v.AddArg3(y, x, cond) 28041 return true 28042 } 28043 // match: (CondSelect <t> x y (SETL cond)) 28044 // cond: is16BitInt(t) 28045 // result: (CMOVWLT y x cond) 28046 for { 28047 t := v.Type 28048 x := v_0 28049 y := v_1 28050 if v_2.Op != OpAMD64SETL { 28051 break 28052 } 28053 cond := v_2.Args[0] 28054 if !(is16BitInt(t)) { 28055 break 28056 } 28057 v.reset(OpAMD64CMOVWLT) 28058 v.AddArg3(y, x, cond) 28059 return true 28060 } 28061 // match: (CondSelect <t> x y (SETG cond)) 28062 // cond: is16BitInt(t) 28063 // result: (CMOVWGT y x cond) 28064 for { 28065 t := v.Type 28066 x := v_0 28067 y := v_1 28068 if v_2.Op != OpAMD64SETG { 28069 break 28070 } 28071 cond := v_2.Args[0] 28072 if !(is16BitInt(t)) { 28073 break 28074 } 28075 v.reset(OpAMD64CMOVWGT) 28076 v.AddArg3(y, x, cond) 28077 return true 28078 } 28079 // match: (CondSelect <t> x y (SETLE cond)) 28080 // cond: is16BitInt(t) 28081 // result: (CMOVWLE y x cond) 28082 for { 28083 t := v.Type 28084 x := v_0 28085 y := v_1 28086 if v_2.Op != OpAMD64SETLE { 28087 break 28088 } 28089 cond := v_2.Args[0] 28090 if !(is16BitInt(t)) { 28091 break 28092 } 28093 v.reset(OpAMD64CMOVWLE) 28094 v.AddArg3(y, x, cond) 28095 return true 28096 } 28097 // match: (CondSelect <t> x y (SETGE cond)) 28098 // cond: is16BitInt(t) 28099 // result: (CMOVWGE y x cond) 28100 for { 28101 t := v.Type 28102 x := v_0 28103 y := v_1 28104 if v_2.Op != OpAMD64SETGE { 28105 break 28106 } 28107 cond := v_2.Args[0] 28108 if !(is16BitInt(t)) { 28109 break 28110 } 28111 v.reset(OpAMD64CMOVWGE) 28112 v.AddArg3(y, x, cond) 28113 return true 28114 } 28115 // match: (CondSelect <t> x y (SETA cond)) 28116 // cond: is16BitInt(t) 28117 // result: (CMOVWHI y x cond) 28118 for { 28119 t := v.Type 28120 x := v_0 28121 y := v_1 28122 if v_2.Op != OpAMD64SETA { 28123 break 28124 } 28125 cond := v_2.Args[0] 28126 if !(is16BitInt(t)) { 28127 break 28128 } 28129 v.reset(OpAMD64CMOVWHI) 28130 v.AddArg3(y, x, cond) 28131 return true 28132 } 28133 // match: (CondSelect <t> x y (SETB cond)) 28134 // cond: is16BitInt(t) 28135 // result: (CMOVWCS y x cond) 28136 for { 28137 t := v.Type 28138 x := v_0 28139 y := v_1 28140 if v_2.Op != OpAMD64SETB { 28141 break 28142 } 28143 cond := v_2.Args[0] 28144 if !(is16BitInt(t)) { 28145 break 28146 } 28147 v.reset(OpAMD64CMOVWCS) 28148 v.AddArg3(y, x, cond) 28149 return true 28150 } 28151 // match: (CondSelect <t> x y (SETAE cond)) 28152 // cond: is16BitInt(t) 28153 // result: (CMOVWCC y x cond) 28154 for { 28155 t := v.Type 28156 x := v_0 28157 y := v_1 28158 if v_2.Op != OpAMD64SETAE { 28159 break 28160 } 28161 cond := v_2.Args[0] 28162 if !(is16BitInt(t)) { 28163 break 28164 } 28165 v.reset(OpAMD64CMOVWCC) 28166 v.AddArg3(y, x, cond) 28167 return true 28168 } 28169 // match: (CondSelect <t> x y (SETBE cond)) 28170 // cond: is16BitInt(t) 28171 // result: (CMOVWLS y x cond) 28172 for { 28173 t := v.Type 28174 x := v_0 28175 y := v_1 28176 if v_2.Op != OpAMD64SETBE { 28177 break 28178 } 28179 cond := v_2.Args[0] 28180 if !(is16BitInt(t)) { 28181 break 28182 } 28183 v.reset(OpAMD64CMOVWLS) 28184 v.AddArg3(y, x, cond) 28185 return true 28186 } 28187 // match: (CondSelect <t> x y (SETEQF cond)) 28188 // cond: is16BitInt(t) 28189 // result: (CMOVWEQF y x cond) 28190 for { 28191 t := v.Type 28192 x := v_0 28193 y := v_1 28194 if v_2.Op != OpAMD64SETEQF { 28195 break 28196 } 28197 cond := v_2.Args[0] 28198 if !(is16BitInt(t)) { 28199 break 28200 } 28201 v.reset(OpAMD64CMOVWEQF) 28202 v.AddArg3(y, x, cond) 28203 return true 28204 } 28205 // match: (CondSelect <t> x y (SETNEF cond)) 28206 // cond: is16BitInt(t) 28207 // result: (CMOVWNEF y x cond) 28208 for { 28209 t := v.Type 28210 x := v_0 28211 y := v_1 28212 if v_2.Op != OpAMD64SETNEF { 28213 break 28214 } 28215 cond := v_2.Args[0] 28216 if !(is16BitInt(t)) { 28217 break 28218 } 28219 v.reset(OpAMD64CMOVWNEF) 28220 v.AddArg3(y, x, cond) 28221 return true 28222 } 28223 // match: (CondSelect <t> x y (SETGF cond)) 28224 // cond: is16BitInt(t) 28225 // result: (CMOVWGTF y x cond) 28226 for { 28227 t := v.Type 28228 x := v_0 28229 y := v_1 28230 if v_2.Op != OpAMD64SETGF { 28231 break 28232 } 28233 cond := v_2.Args[0] 28234 if !(is16BitInt(t)) { 28235 break 28236 } 28237 v.reset(OpAMD64CMOVWGTF) 28238 v.AddArg3(y, x, cond) 28239 return true 28240 } 28241 // match: (CondSelect <t> x y (SETGEF cond)) 28242 // cond: is16BitInt(t) 28243 // result: (CMOVWGEF y x cond) 28244 for { 28245 t := v.Type 28246 x := v_0 28247 y := v_1 28248 if v_2.Op != OpAMD64SETGEF { 28249 break 28250 } 28251 cond := v_2.Args[0] 28252 if !(is16BitInt(t)) { 28253 break 28254 } 28255 v.reset(OpAMD64CMOVWGEF) 28256 v.AddArg3(y, x, cond) 28257 return true 28258 } 28259 // match: (CondSelect <t> x y check) 28260 // cond: !check.Type.IsFlags() && check.Type.Size() == 1 28261 // result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check)) 28262 for { 28263 t := v.Type 28264 x := v_0 28265 y := v_1 28266 check := v_2 28267 if !(!check.Type.IsFlags() && check.Type.Size() == 1) { 28268 break 28269 } 28270 v.reset(OpCondSelect) 28271 v.Type = t 28272 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) 28273 v0.AddArg(check) 28274 v.AddArg3(x, y, v0) 28275 return true 28276 } 28277 // match: (CondSelect <t> x y check) 28278 // cond: !check.Type.IsFlags() && check.Type.Size() == 2 28279 // result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check)) 28280 for { 28281 t := v.Type 28282 x := v_0 28283 y := v_1 28284 check := v_2 28285 if !(!check.Type.IsFlags() && check.Type.Size() == 2) { 28286 break 28287 } 28288 v.reset(OpCondSelect) 28289 v.Type = t 28290 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) 28291 v0.AddArg(check) 28292 v.AddArg3(x, y, v0) 28293 return true 28294 } 28295 // match: (CondSelect <t> x y check) 28296 // cond: !check.Type.IsFlags() && check.Type.Size() == 4 28297 // result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check)) 28298 for { 28299 t := v.Type 28300 x := v_0 28301 y := v_1 28302 check := v_2 28303 if !(!check.Type.IsFlags() && check.Type.Size() == 4) { 28304 break 28305 } 28306 v.reset(OpCondSelect) 28307 v.Type = t 28308 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 28309 v0.AddArg(check) 28310 v.AddArg3(x, y, v0) 28311 return true 28312 } 28313 // match: (CondSelect <t> x y check) 28314 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) 28315 // result: (CMOVQNE y x (CMPQconst [0] check)) 28316 for { 28317 t := v.Type 28318 x := v_0 28319 y := v_1 28320 check := v_2 28321 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { 28322 break 28323 } 28324 v.reset(OpAMD64CMOVQNE) 28325 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 28326 v0.AuxInt = int32ToAuxInt(0) 28327 v0.AddArg(check) 28328 v.AddArg3(y, x, v0) 28329 return true 28330 } 28331 // match: (CondSelect <t> x y check) 28332 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) 28333 // result: (CMOVLNE y x (CMPQconst [0] check)) 28334 for { 28335 t := v.Type 28336 x := v_0 28337 y := v_1 28338 check := v_2 28339 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { 28340 break 28341 } 28342 v.reset(OpAMD64CMOVLNE) 28343 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 28344 v0.AuxInt = int32ToAuxInt(0) 28345 v0.AddArg(check) 28346 v.AddArg3(y, x, v0) 28347 return true 28348 } 28349 // match: (CondSelect <t> x y check) 28350 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) 28351 // result: (CMOVWNE y x (CMPQconst [0] check)) 28352 for { 28353 t := v.Type 28354 x := v_0 28355 y := v_1 28356 check := v_2 28357 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { 28358 break 28359 } 28360 v.reset(OpAMD64CMOVWNE) 28361 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 28362 v0.AuxInt = int32ToAuxInt(0) 28363 v0.AddArg(check) 28364 v.AddArg3(y, x, v0) 28365 return true 28366 } 28367 return false 28368 } 28369 func rewriteValueAMD64_OpConst16(v *Value) bool { 28370 // match: (Const16 [c]) 28371 // result: (MOVLconst [int32(c)]) 28372 for { 28373 c := auxIntToInt16(v.AuxInt) 28374 v.reset(OpAMD64MOVLconst) 28375 v.AuxInt = int32ToAuxInt(int32(c)) 28376 return true 28377 } 28378 } 28379 func rewriteValueAMD64_OpConst8(v *Value) bool { 28380 // match: (Const8 [c]) 28381 // result: (MOVLconst [int32(c)]) 28382 for { 28383 c := auxIntToInt8(v.AuxInt) 28384 v.reset(OpAMD64MOVLconst) 28385 v.AuxInt = int32ToAuxInt(int32(c)) 28386 return true 28387 } 28388 } 28389 func rewriteValueAMD64_OpConstBool(v *Value) bool { 28390 // match: (ConstBool [c]) 28391 // result: (MOVLconst [b2i32(c)]) 28392 for { 28393 c := auxIntToBool(v.AuxInt) 28394 v.reset(OpAMD64MOVLconst) 28395 v.AuxInt = int32ToAuxInt(b2i32(c)) 28396 return true 28397 } 28398 } 28399 func rewriteValueAMD64_OpConstNil(v *Value) bool { 28400 // match: (ConstNil ) 28401 // result: (MOVQconst [0]) 28402 for { 28403 v.reset(OpAMD64MOVQconst) 28404 v.AuxInt = int64ToAuxInt(0) 28405 return true 28406 } 28407 } 28408 func rewriteValueAMD64_OpCtz16(v *Value) bool { 28409 v_0 := v.Args[0] 28410 b := v.Block 28411 typ := &b.Func.Config.Types 28412 // match: (Ctz16 x) 28413 // result: (BSFL (BTSLconst <typ.UInt32> [16] x)) 28414 for { 28415 x := v_0 28416 v.reset(OpAMD64BSFL) 28417 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32) 28418 v0.AuxInt = int8ToAuxInt(16) 28419 v0.AddArg(x) 28420 v.AddArg(v0) 28421 return true 28422 } 28423 } 28424 func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { 28425 v_0 := v.Args[0] 28426 // match: (Ctz16NonZero x) 28427 // cond: buildcfg.GOAMD64 >= 3 28428 // result: (TZCNTL x) 28429 for { 28430 x := v_0 28431 if !(buildcfg.GOAMD64 >= 3) { 28432 break 28433 } 28434 v.reset(OpAMD64TZCNTL) 28435 v.AddArg(x) 28436 return true 28437 } 28438 // match: (Ctz16NonZero x) 28439 // cond: buildcfg.GOAMD64 < 3 28440 // result: (BSFL x) 28441 for { 28442 x := v_0 28443 if !(buildcfg.GOAMD64 < 3) { 28444 break 28445 } 28446 v.reset(OpAMD64BSFL) 28447 v.AddArg(x) 28448 return true 28449 } 28450 return false 28451 } 28452 func rewriteValueAMD64_OpCtz32(v *Value) bool { 28453 v_0 := v.Args[0] 28454 b := v.Block 28455 typ := &b.Func.Config.Types 28456 // match: (Ctz32 x) 28457 // cond: buildcfg.GOAMD64 >= 3 28458 // result: (TZCNTL x) 28459 for { 28460 x := v_0 28461 if !(buildcfg.GOAMD64 >= 3) { 28462 break 28463 } 28464 v.reset(OpAMD64TZCNTL) 28465 v.AddArg(x) 28466 return true 28467 } 28468 // match: (Ctz32 x) 28469 // cond: buildcfg.GOAMD64 < 3 28470 // result: (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x))) 28471 for { 28472 x := v_0 28473 if !(buildcfg.GOAMD64 < 3) { 28474 break 28475 } 28476 v.reset(OpSelect0) 28477 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 28478 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) 28479 v1.AuxInt = int8ToAuxInt(32) 28480 v1.AddArg(x) 28481 v0.AddArg(v1) 28482 v.AddArg(v0) 28483 return true 28484 } 28485 return false 28486 } 28487 func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { 28488 v_0 := v.Args[0] 28489 // match: (Ctz32NonZero x) 28490 // cond: buildcfg.GOAMD64 >= 3 28491 // result: (TZCNTL x) 28492 for { 28493 x := v_0 28494 if !(buildcfg.GOAMD64 >= 3) { 28495 break 28496 } 28497 v.reset(OpAMD64TZCNTL) 28498 v.AddArg(x) 28499 return true 28500 } 28501 // match: (Ctz32NonZero x) 28502 // cond: buildcfg.GOAMD64 < 3 28503 // result: (BSFL x) 28504 for { 28505 x := v_0 28506 if !(buildcfg.GOAMD64 < 3) { 28507 break 28508 } 28509 v.reset(OpAMD64BSFL) 28510 v.AddArg(x) 28511 return true 28512 } 28513 return false 28514 } 28515 func rewriteValueAMD64_OpCtz64(v *Value) bool { 28516 v_0 := v.Args[0] 28517 b := v.Block 28518 typ := &b.Func.Config.Types 28519 // match: (Ctz64 x) 28520 // cond: buildcfg.GOAMD64 >= 3 28521 // result: (TZCNTQ x) 28522 for { 28523 x := v_0 28524 if !(buildcfg.GOAMD64 >= 3) { 28525 break 28526 } 28527 v.reset(OpAMD64TZCNTQ) 28528 v.AddArg(x) 28529 return true 28530 } 28531 // match: (Ctz64 <t> x) 28532 // cond: buildcfg.GOAMD64 < 3 28533 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 28534 for { 28535 t := v.Type 28536 x := v_0 28537 if !(buildcfg.GOAMD64 < 3) { 28538 break 28539 } 28540 v.reset(OpAMD64CMOVQEQ) 28541 v0 := b.NewValue0(v.Pos, OpSelect0, t) 28542 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 28543 v1.AddArg(x) 28544 v0.AddArg(v1) 28545 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 28546 v2.AuxInt = int64ToAuxInt(64) 28547 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 28548 v3.AddArg(v1) 28549 v.AddArg3(v0, v2, v3) 28550 return true 28551 } 28552 return false 28553 } 28554 func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { 28555 v_0 := v.Args[0] 28556 b := v.Block 28557 typ := &b.Func.Config.Types 28558 // match: (Ctz64NonZero x) 28559 // cond: buildcfg.GOAMD64 >= 3 28560 // result: (TZCNTQ x) 28561 for { 28562 x := v_0 28563 if !(buildcfg.GOAMD64 >= 3) { 28564 break 28565 } 28566 v.reset(OpAMD64TZCNTQ) 28567 v.AddArg(x) 28568 return true 28569 } 28570 // match: (Ctz64NonZero x) 28571 // cond: buildcfg.GOAMD64 < 3 28572 // result: (Select0 (BSFQ x)) 28573 for { 28574 x := v_0 28575 if !(buildcfg.GOAMD64 < 3) { 28576 break 28577 } 28578 v.reset(OpSelect0) 28579 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 28580 v0.AddArg(x) 28581 v.AddArg(v0) 28582 return true 28583 } 28584 return false 28585 } 28586 func rewriteValueAMD64_OpCtz8(v *Value) bool { 28587 v_0 := v.Args[0] 28588 b := v.Block 28589 typ := &b.Func.Config.Types 28590 // match: (Ctz8 x) 28591 // result: (BSFL (BTSLconst <typ.UInt32> [ 8] x)) 28592 for { 28593 x := v_0 28594 v.reset(OpAMD64BSFL) 28595 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32) 28596 v0.AuxInt = int8ToAuxInt(8) 28597 v0.AddArg(x) 28598 v.AddArg(v0) 28599 return true 28600 } 28601 } 28602 func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { 28603 v_0 := v.Args[0] 28604 // match: (Ctz8NonZero x) 28605 // cond: buildcfg.GOAMD64 >= 3 28606 // result: (TZCNTL x) 28607 for { 28608 x := v_0 28609 if !(buildcfg.GOAMD64 >= 3) { 28610 break 28611 } 28612 v.reset(OpAMD64TZCNTL) 28613 v.AddArg(x) 28614 return true 28615 } 28616 // match: (Ctz8NonZero x) 28617 // cond: buildcfg.GOAMD64 < 3 28618 // result: (BSFL x) 28619 for { 28620 x := v_0 28621 if !(buildcfg.GOAMD64 < 3) { 28622 break 28623 } 28624 v.reset(OpAMD64BSFL) 28625 v.AddArg(x) 28626 return true 28627 } 28628 return false 28629 } 28630 func rewriteValueAMD64_OpDiv16(v *Value) bool { 28631 v_1 := v.Args[1] 28632 v_0 := v.Args[0] 28633 b := v.Block 28634 typ := &b.Func.Config.Types 28635 // match: (Div16 [a] x y) 28636 // result: (Select0 (DIVW [a] x y)) 28637 for { 28638 a := auxIntToBool(v.AuxInt) 28639 x := v_0 28640 y := v_1 28641 v.reset(OpSelect0) 28642 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 28643 v0.AuxInt = boolToAuxInt(a) 28644 v0.AddArg2(x, y) 28645 v.AddArg(v0) 28646 return true 28647 } 28648 } 28649 func rewriteValueAMD64_OpDiv16u(v *Value) bool { 28650 v_1 := v.Args[1] 28651 v_0 := v.Args[0] 28652 b := v.Block 28653 typ := &b.Func.Config.Types 28654 // match: (Div16u x y) 28655 // result: (Select0 (DIVWU x y)) 28656 for { 28657 x := v_0 28658 y := v_1 28659 v.reset(OpSelect0) 28660 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 28661 v0.AddArg2(x, y) 28662 v.AddArg(v0) 28663 return true 28664 } 28665 } 28666 func rewriteValueAMD64_OpDiv32(v *Value) bool { 28667 v_1 := v.Args[1] 28668 v_0 := v.Args[0] 28669 b := v.Block 28670 typ := &b.Func.Config.Types 28671 // match: (Div32 [a] x y) 28672 // result: (Select0 (DIVL [a] x y)) 28673 for { 28674 a := auxIntToBool(v.AuxInt) 28675 x := v_0 28676 y := v_1 28677 v.reset(OpSelect0) 28678 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 28679 v0.AuxInt = boolToAuxInt(a) 28680 v0.AddArg2(x, y) 28681 v.AddArg(v0) 28682 return true 28683 } 28684 } 28685 func rewriteValueAMD64_OpDiv32u(v *Value) bool { 28686 v_1 := v.Args[1] 28687 v_0 := v.Args[0] 28688 b := v.Block 28689 typ := &b.Func.Config.Types 28690 // match: (Div32u x y) 28691 // result: (Select0 (DIVLU x y)) 28692 for { 28693 x := v_0 28694 y := v_1 28695 v.reset(OpSelect0) 28696 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 28697 v0.AddArg2(x, y) 28698 v.AddArg(v0) 28699 return true 28700 } 28701 } 28702 func rewriteValueAMD64_OpDiv64(v *Value) bool { 28703 v_1 := v.Args[1] 28704 v_0 := v.Args[0] 28705 b := v.Block 28706 typ := &b.Func.Config.Types 28707 // match: (Div64 [a] x y) 28708 // result: (Select0 (DIVQ [a] x y)) 28709 for { 28710 a := auxIntToBool(v.AuxInt) 28711 x := v_0 28712 y := v_1 28713 v.reset(OpSelect0) 28714 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 28715 v0.AuxInt = boolToAuxInt(a) 28716 v0.AddArg2(x, y) 28717 v.AddArg(v0) 28718 return true 28719 } 28720 } 28721 func rewriteValueAMD64_OpDiv64u(v *Value) bool { 28722 v_1 := v.Args[1] 28723 v_0 := v.Args[0] 28724 b := v.Block 28725 typ := &b.Func.Config.Types 28726 // match: (Div64u x y) 28727 // result: (Select0 (DIVQU x y)) 28728 for { 28729 x := v_0 28730 y := v_1 28731 v.reset(OpSelect0) 28732 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 28733 v0.AddArg2(x, y) 28734 v.AddArg(v0) 28735 return true 28736 } 28737 } 28738 func rewriteValueAMD64_OpDiv8(v *Value) bool { 28739 v_1 := v.Args[1] 28740 v_0 := v.Args[0] 28741 b := v.Block 28742 typ := &b.Func.Config.Types 28743 // match: (Div8 x y) 28744 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 28745 for { 28746 x := v_0 28747 y := v_1 28748 v.reset(OpSelect0) 28749 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 28750 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 28751 v1.AddArg(x) 28752 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 28753 v2.AddArg(y) 28754 v0.AddArg2(v1, v2) 28755 v.AddArg(v0) 28756 return true 28757 } 28758 } 28759 func rewriteValueAMD64_OpDiv8u(v *Value) bool { 28760 v_1 := v.Args[1] 28761 v_0 := v.Args[0] 28762 b := v.Block 28763 typ := &b.Func.Config.Types 28764 // match: (Div8u x y) 28765 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 28766 for { 28767 x := v_0 28768 y := v_1 28769 v.reset(OpSelect0) 28770 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 28771 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 28772 v1.AddArg(x) 28773 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 28774 v2.AddArg(y) 28775 v0.AddArg2(v1, v2) 28776 v.AddArg(v0) 28777 return true 28778 } 28779 } 28780 func rewriteValueAMD64_OpEq16(v *Value) bool { 28781 v_1 := v.Args[1] 28782 v_0 := v.Args[0] 28783 b := v.Block 28784 // match: (Eq16 x y) 28785 // result: (SETEQ (CMPW x y)) 28786 for { 28787 x := v_0 28788 y := v_1 28789 v.reset(OpAMD64SETEQ) 28790 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 28791 v0.AddArg2(x, y) 28792 v.AddArg(v0) 28793 return true 28794 } 28795 } 28796 func rewriteValueAMD64_OpEq32(v *Value) bool { 28797 v_1 := v.Args[1] 28798 v_0 := v.Args[0] 28799 b := v.Block 28800 // match: (Eq32 x y) 28801 // result: (SETEQ (CMPL x y)) 28802 for { 28803 x := v_0 28804 y := v_1 28805 v.reset(OpAMD64SETEQ) 28806 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 28807 v0.AddArg2(x, y) 28808 v.AddArg(v0) 28809 return true 28810 } 28811 } 28812 func rewriteValueAMD64_OpEq32F(v *Value) bool { 28813 v_1 := v.Args[1] 28814 v_0 := v.Args[0] 28815 b := v.Block 28816 // match: (Eq32F x y) 28817 // result: (SETEQF (UCOMISS x y)) 28818 for { 28819 x := v_0 28820 y := v_1 28821 v.reset(OpAMD64SETEQF) 28822 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 28823 v0.AddArg2(x, y) 28824 v.AddArg(v0) 28825 return true 28826 } 28827 } 28828 func rewriteValueAMD64_OpEq64(v *Value) bool { 28829 v_1 := v.Args[1] 28830 v_0 := v.Args[0] 28831 b := v.Block 28832 // match: (Eq64 x y) 28833 // result: (SETEQ (CMPQ x y)) 28834 for { 28835 x := v_0 28836 y := v_1 28837 v.reset(OpAMD64SETEQ) 28838 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 28839 v0.AddArg2(x, y) 28840 v.AddArg(v0) 28841 return true 28842 } 28843 } 28844 func rewriteValueAMD64_OpEq64F(v *Value) bool { 28845 v_1 := v.Args[1] 28846 v_0 := v.Args[0] 28847 b := v.Block 28848 // match: (Eq64F x y) 28849 // result: (SETEQF (UCOMISD x y)) 28850 for { 28851 x := v_0 28852 y := v_1 28853 v.reset(OpAMD64SETEQF) 28854 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 28855 v0.AddArg2(x, y) 28856 v.AddArg(v0) 28857 return true 28858 } 28859 } 28860 func rewriteValueAMD64_OpEq8(v *Value) bool { 28861 v_1 := v.Args[1] 28862 v_0 := v.Args[0] 28863 b := v.Block 28864 // match: (Eq8 x y) 28865 // result: (SETEQ (CMPB x y)) 28866 for { 28867 x := v_0 28868 y := v_1 28869 v.reset(OpAMD64SETEQ) 28870 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 28871 v0.AddArg2(x, y) 28872 v.AddArg(v0) 28873 return true 28874 } 28875 } 28876 func rewriteValueAMD64_OpEqB(v *Value) bool { 28877 v_1 := v.Args[1] 28878 v_0 := v.Args[0] 28879 b := v.Block 28880 // match: (EqB x y) 28881 // result: (SETEQ (CMPB x y)) 28882 for { 28883 x := v_0 28884 y := v_1 28885 v.reset(OpAMD64SETEQ) 28886 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 28887 v0.AddArg2(x, y) 28888 v.AddArg(v0) 28889 return true 28890 } 28891 } 28892 func rewriteValueAMD64_OpEqPtr(v *Value) bool { 28893 v_1 := v.Args[1] 28894 v_0 := v.Args[0] 28895 b := v.Block 28896 // match: (EqPtr x y) 28897 // result: (SETEQ (CMPQ x y)) 28898 for { 28899 x := v_0 28900 y := v_1 28901 v.reset(OpAMD64SETEQ) 28902 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 28903 v0.AddArg2(x, y) 28904 v.AddArg(v0) 28905 return true 28906 } 28907 } 28908 func rewriteValueAMD64_OpFMA(v *Value) bool { 28909 v_2 := v.Args[2] 28910 v_1 := v.Args[1] 28911 v_0 := v.Args[0] 28912 // match: (FMA x y z) 28913 // result: (VFMADD231SD z x y) 28914 for { 28915 x := v_0 28916 y := v_1 28917 z := v_2 28918 v.reset(OpAMD64VFMADD231SD) 28919 v.AddArg3(z, x, y) 28920 return true 28921 } 28922 } 28923 func rewriteValueAMD64_OpFloor(v *Value) bool { 28924 v_0 := v.Args[0] 28925 // match: (Floor x) 28926 // result: (ROUNDSD [1] x) 28927 for { 28928 x := v_0 28929 v.reset(OpAMD64ROUNDSD) 28930 v.AuxInt = int8ToAuxInt(1) 28931 v.AddArg(x) 28932 return true 28933 } 28934 } 28935 func rewriteValueAMD64_OpGetG(v *Value) bool { 28936 v_0 := v.Args[0] 28937 // match: (GetG mem) 28938 // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal 28939 // result: (LoweredGetG mem) 28940 for { 28941 mem := v_0 28942 if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { 28943 break 28944 } 28945 v.reset(OpAMD64LoweredGetG) 28946 v.AddArg(mem) 28947 return true 28948 } 28949 return false 28950 } 28951 func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { 28952 b := v.Block 28953 typ := &b.Func.Config.Types 28954 // match: (HasCPUFeature {s}) 28955 // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) 28956 for { 28957 s := auxToSym(v.Aux) 28958 v.reset(OpAMD64SETNE) 28959 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 28960 v0.AuxInt = int32ToAuxInt(0) 28961 v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) 28962 v1.Aux = symToAux(s) 28963 v0.AddArg(v1) 28964 v.AddArg(v0) 28965 return true 28966 } 28967 } 28968 func rewriteValueAMD64_OpIsInBounds(v *Value) bool { 28969 v_1 := v.Args[1] 28970 v_0 := v.Args[0] 28971 b := v.Block 28972 // match: (IsInBounds idx len) 28973 // result: (SETB (CMPQ idx len)) 28974 for { 28975 idx := v_0 28976 len := v_1 28977 v.reset(OpAMD64SETB) 28978 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 28979 v0.AddArg2(idx, len) 28980 v.AddArg(v0) 28981 return true 28982 } 28983 } 28984 func rewriteValueAMD64_OpIsNonNil(v *Value) bool { 28985 v_0 := v.Args[0] 28986 b := v.Block 28987 // match: (IsNonNil p) 28988 // result: (SETNE (TESTQ p p)) 28989 for { 28990 p := v_0 28991 v.reset(OpAMD64SETNE) 28992 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 28993 v0.AddArg2(p, p) 28994 v.AddArg(v0) 28995 return true 28996 } 28997 } 28998 func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { 28999 v_1 := v.Args[1] 29000 v_0 := v.Args[0] 29001 b := v.Block 29002 // match: (IsSliceInBounds idx len) 29003 // result: (SETBE (CMPQ idx len)) 29004 for { 29005 idx := v_0 29006 len := v_1 29007 v.reset(OpAMD64SETBE) 29008 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 29009 v0.AddArg2(idx, len) 29010 v.AddArg(v0) 29011 return true 29012 } 29013 } 29014 func rewriteValueAMD64_OpLeq16(v *Value) bool { 29015 v_1 := v.Args[1] 29016 v_0 := v.Args[0] 29017 b := v.Block 29018 // match: (Leq16 x y) 29019 // result: (SETLE (CMPW x y)) 29020 for { 29021 x := v_0 29022 y := v_1 29023 v.reset(OpAMD64SETLE) 29024 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 29025 v0.AddArg2(x, y) 29026 v.AddArg(v0) 29027 return true 29028 } 29029 } 29030 func rewriteValueAMD64_OpLeq16U(v *Value) bool { 29031 v_1 := v.Args[1] 29032 v_0 := v.Args[0] 29033 b := v.Block 29034 // match: (Leq16U x y) 29035 // result: (SETBE (CMPW x y)) 29036 for { 29037 x := v_0 29038 y := v_1 29039 v.reset(OpAMD64SETBE) 29040 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 29041 v0.AddArg2(x, y) 29042 v.AddArg(v0) 29043 return true 29044 } 29045 } 29046 func rewriteValueAMD64_OpLeq32(v *Value) bool { 29047 v_1 := v.Args[1] 29048 v_0 := v.Args[0] 29049 b := v.Block 29050 // match: (Leq32 x y) 29051 // result: (SETLE (CMPL x y)) 29052 for { 29053 x := v_0 29054 y := v_1 29055 v.reset(OpAMD64SETLE) 29056 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 29057 v0.AddArg2(x, y) 29058 v.AddArg(v0) 29059 return true 29060 } 29061 } 29062 func rewriteValueAMD64_OpLeq32F(v *Value) bool { 29063 v_1 := v.Args[1] 29064 v_0 := v.Args[0] 29065 b := v.Block 29066 // match: (Leq32F x y) 29067 // result: (SETGEF (UCOMISS y x)) 29068 for { 29069 x := v_0 29070 y := v_1 29071 v.reset(OpAMD64SETGEF) 29072 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 29073 v0.AddArg2(y, x) 29074 v.AddArg(v0) 29075 return true 29076 } 29077 } 29078 func rewriteValueAMD64_OpLeq32U(v *Value) bool { 29079 v_1 := v.Args[1] 29080 v_0 := v.Args[0] 29081 b := v.Block 29082 // match: (Leq32U x y) 29083 // result: (SETBE (CMPL x y)) 29084 for { 29085 x := v_0 29086 y := v_1 29087 v.reset(OpAMD64SETBE) 29088 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 29089 v0.AddArg2(x, y) 29090 v.AddArg(v0) 29091 return true 29092 } 29093 } 29094 func rewriteValueAMD64_OpLeq64(v *Value) bool { 29095 v_1 := v.Args[1] 29096 v_0 := v.Args[0] 29097 b := v.Block 29098 // match: (Leq64 x y) 29099 // result: (SETLE (CMPQ x y)) 29100 for { 29101 x := v_0 29102 y := v_1 29103 v.reset(OpAMD64SETLE) 29104 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 29105 v0.AddArg2(x, y) 29106 v.AddArg(v0) 29107 return true 29108 } 29109 } 29110 func rewriteValueAMD64_OpLeq64F(v *Value) bool { 29111 v_1 := v.Args[1] 29112 v_0 := v.Args[0] 29113 b := v.Block 29114 // match: (Leq64F x y) 29115 // result: (SETGEF (UCOMISD y x)) 29116 for { 29117 x := v_0 29118 y := v_1 29119 v.reset(OpAMD64SETGEF) 29120 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 29121 v0.AddArg2(y, x) 29122 v.AddArg(v0) 29123 return true 29124 } 29125 } 29126 func rewriteValueAMD64_OpLeq64U(v *Value) bool { 29127 v_1 := v.Args[1] 29128 v_0 := v.Args[0] 29129 b := v.Block 29130 // match: (Leq64U x y) 29131 // result: (SETBE (CMPQ x y)) 29132 for { 29133 x := v_0 29134 y := v_1 29135 v.reset(OpAMD64SETBE) 29136 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 29137 v0.AddArg2(x, y) 29138 v.AddArg(v0) 29139 return true 29140 } 29141 } 29142 func rewriteValueAMD64_OpLeq8(v *Value) bool { 29143 v_1 := v.Args[1] 29144 v_0 := v.Args[0] 29145 b := v.Block 29146 // match: (Leq8 x y) 29147 // result: (SETLE (CMPB x y)) 29148 for { 29149 x := v_0 29150 y := v_1 29151 v.reset(OpAMD64SETLE) 29152 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 29153 v0.AddArg2(x, y) 29154 v.AddArg(v0) 29155 return true 29156 } 29157 } 29158 func rewriteValueAMD64_OpLeq8U(v *Value) bool { 29159 v_1 := v.Args[1] 29160 v_0 := v.Args[0] 29161 b := v.Block 29162 // match: (Leq8U x y) 29163 // result: (SETBE (CMPB x y)) 29164 for { 29165 x := v_0 29166 y := v_1 29167 v.reset(OpAMD64SETBE) 29168 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 29169 v0.AddArg2(x, y) 29170 v.AddArg(v0) 29171 return true 29172 } 29173 } 29174 func rewriteValueAMD64_OpLess16(v *Value) bool { 29175 v_1 := v.Args[1] 29176 v_0 := v.Args[0] 29177 b := v.Block 29178 // match: (Less16 x y) 29179 // result: (SETL (CMPW x y)) 29180 for { 29181 x := v_0 29182 y := v_1 29183 v.reset(OpAMD64SETL) 29184 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 29185 v0.AddArg2(x, y) 29186 v.AddArg(v0) 29187 return true 29188 } 29189 } 29190 func rewriteValueAMD64_OpLess16U(v *Value) bool { 29191 v_1 := v.Args[1] 29192 v_0 := v.Args[0] 29193 b := v.Block 29194 // match: (Less16U x y) 29195 // result: (SETB (CMPW x y)) 29196 for { 29197 x := v_0 29198 y := v_1 29199 v.reset(OpAMD64SETB) 29200 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 29201 v0.AddArg2(x, y) 29202 v.AddArg(v0) 29203 return true 29204 } 29205 } 29206 func rewriteValueAMD64_OpLess32(v *Value) bool { 29207 v_1 := v.Args[1] 29208 v_0 := v.Args[0] 29209 b := v.Block 29210 // match: (Less32 x y) 29211 // result: (SETL (CMPL x y)) 29212 for { 29213 x := v_0 29214 y := v_1 29215 v.reset(OpAMD64SETL) 29216 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 29217 v0.AddArg2(x, y) 29218 v.AddArg(v0) 29219 return true 29220 } 29221 } 29222 func rewriteValueAMD64_OpLess32F(v *Value) bool { 29223 v_1 := v.Args[1] 29224 v_0 := v.Args[0] 29225 b := v.Block 29226 // match: (Less32F x y) 29227 // result: (SETGF (UCOMISS y x)) 29228 for { 29229 x := v_0 29230 y := v_1 29231 v.reset(OpAMD64SETGF) 29232 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 29233 v0.AddArg2(y, x) 29234 v.AddArg(v0) 29235 return true 29236 } 29237 } 29238 func rewriteValueAMD64_OpLess32U(v *Value) bool { 29239 v_1 := v.Args[1] 29240 v_0 := v.Args[0] 29241 b := v.Block 29242 // match: (Less32U x y) 29243 // result: (SETB (CMPL x y)) 29244 for { 29245 x := v_0 29246 y := v_1 29247 v.reset(OpAMD64SETB) 29248 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 29249 v0.AddArg2(x, y) 29250 v.AddArg(v0) 29251 return true 29252 } 29253 } 29254 func rewriteValueAMD64_OpLess64(v *Value) bool { 29255 v_1 := v.Args[1] 29256 v_0 := v.Args[0] 29257 b := v.Block 29258 // match: (Less64 x y) 29259 // result: (SETL (CMPQ x y)) 29260 for { 29261 x := v_0 29262 y := v_1 29263 v.reset(OpAMD64SETL) 29264 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 29265 v0.AddArg2(x, y) 29266 v.AddArg(v0) 29267 return true 29268 } 29269 } 29270 func rewriteValueAMD64_OpLess64F(v *Value) bool { 29271 v_1 := v.Args[1] 29272 v_0 := v.Args[0] 29273 b := v.Block 29274 // match: (Less64F x y) 29275 // result: (SETGF (UCOMISD y x)) 29276 for { 29277 x := v_0 29278 y := v_1 29279 v.reset(OpAMD64SETGF) 29280 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 29281 v0.AddArg2(y, x) 29282 v.AddArg(v0) 29283 return true 29284 } 29285 } 29286 func rewriteValueAMD64_OpLess64U(v *Value) bool { 29287 v_1 := v.Args[1] 29288 v_0 := v.Args[0] 29289 b := v.Block 29290 // match: (Less64U x y) 29291 // result: (SETB (CMPQ x y)) 29292 for { 29293 x := v_0 29294 y := v_1 29295 v.reset(OpAMD64SETB) 29296 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 29297 v0.AddArg2(x, y) 29298 v.AddArg(v0) 29299 return true 29300 } 29301 } 29302 func rewriteValueAMD64_OpLess8(v *Value) bool { 29303 v_1 := v.Args[1] 29304 v_0 := v.Args[0] 29305 b := v.Block 29306 // match: (Less8 x y) 29307 // result: (SETL (CMPB x y)) 29308 for { 29309 x := v_0 29310 y := v_1 29311 v.reset(OpAMD64SETL) 29312 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 29313 v0.AddArg2(x, y) 29314 v.AddArg(v0) 29315 return true 29316 } 29317 } 29318 func rewriteValueAMD64_OpLess8U(v *Value) bool { 29319 v_1 := v.Args[1] 29320 v_0 := v.Args[0] 29321 b := v.Block 29322 // match: (Less8U x y) 29323 // result: (SETB (CMPB x y)) 29324 for { 29325 x := v_0 29326 y := v_1 29327 v.reset(OpAMD64SETB) 29328 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 29329 v0.AddArg2(x, y) 29330 v.AddArg(v0) 29331 return true 29332 } 29333 } 29334 func rewriteValueAMD64_OpLoad(v *Value) bool { 29335 v_1 := v.Args[1] 29336 v_0 := v.Args[0] 29337 // match: (Load <t> ptr mem) 29338 // cond: (is64BitInt(t) || isPtr(t)) 29339 // result: (MOVQload ptr mem) 29340 for { 29341 t := v.Type 29342 ptr := v_0 29343 mem := v_1 29344 if !(is64BitInt(t) || isPtr(t)) { 29345 break 29346 } 29347 v.reset(OpAMD64MOVQload) 29348 v.AddArg2(ptr, mem) 29349 return true 29350 } 29351 // match: (Load <t> ptr mem) 29352 // cond: is32BitInt(t) 29353 // result: (MOVLload ptr mem) 29354 for { 29355 t := v.Type 29356 ptr := v_0 29357 mem := v_1 29358 if !(is32BitInt(t)) { 29359 break 29360 } 29361 v.reset(OpAMD64MOVLload) 29362 v.AddArg2(ptr, mem) 29363 return true 29364 } 29365 // match: (Load <t> ptr mem) 29366 // cond: is16BitInt(t) 29367 // result: (MOVWload ptr mem) 29368 for { 29369 t := v.Type 29370 ptr := v_0 29371 mem := v_1 29372 if !(is16BitInt(t)) { 29373 break 29374 } 29375 v.reset(OpAMD64MOVWload) 29376 v.AddArg2(ptr, mem) 29377 return true 29378 } 29379 // match: (Load <t> ptr mem) 29380 // cond: (t.IsBoolean() || is8BitInt(t)) 29381 // result: (MOVBload ptr mem) 29382 for { 29383 t := v.Type 29384 ptr := v_0 29385 mem := v_1 29386 if !(t.IsBoolean() || is8BitInt(t)) { 29387 break 29388 } 29389 v.reset(OpAMD64MOVBload) 29390 v.AddArg2(ptr, mem) 29391 return true 29392 } 29393 // match: (Load <t> ptr mem) 29394 // cond: is32BitFloat(t) 29395 // result: (MOVSSload ptr mem) 29396 for { 29397 t := v.Type 29398 ptr := v_0 29399 mem := v_1 29400 if !(is32BitFloat(t)) { 29401 break 29402 } 29403 v.reset(OpAMD64MOVSSload) 29404 v.AddArg2(ptr, mem) 29405 return true 29406 } 29407 // match: (Load <t> ptr mem) 29408 // cond: is64BitFloat(t) 29409 // result: (MOVSDload ptr mem) 29410 for { 29411 t := v.Type 29412 ptr := v_0 29413 mem := v_1 29414 if !(is64BitFloat(t)) { 29415 break 29416 } 29417 v.reset(OpAMD64MOVSDload) 29418 v.AddArg2(ptr, mem) 29419 return true 29420 } 29421 return false 29422 } 29423 func rewriteValueAMD64_OpLocalAddr(v *Value) bool { 29424 v_0 := v.Args[0] 29425 // match: (LocalAddr {sym} base _) 29426 // result: (LEAQ {sym} base) 29427 for { 29428 sym := auxToSym(v.Aux) 29429 base := v_0 29430 v.reset(OpAMD64LEAQ) 29431 v.Aux = symToAux(sym) 29432 v.AddArg(base) 29433 return true 29434 } 29435 } 29436 func rewriteValueAMD64_OpLsh16x16(v *Value) bool { 29437 v_1 := v.Args[1] 29438 v_0 := v.Args[0] 29439 b := v.Block 29440 // match: (Lsh16x16 <t> x y) 29441 // cond: !shiftIsBounded(v) 29442 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 29443 for { 29444 t := v.Type 29445 x := v_0 29446 y := v_1 29447 if !(!shiftIsBounded(v)) { 29448 break 29449 } 29450 v.reset(OpAMD64ANDL) 29451 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 29452 v0.AddArg2(x, y) 29453 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 29454 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 29455 v2.AuxInt = int16ToAuxInt(32) 29456 v2.AddArg(y) 29457 v1.AddArg(v2) 29458 v.AddArg2(v0, v1) 29459 return true 29460 } 29461 // match: (Lsh16x16 x y) 29462 // cond: shiftIsBounded(v) 29463 // result: (SHLL x y) 29464 for { 29465 x := v_0 29466 y := v_1 29467 if !(shiftIsBounded(v)) { 29468 break 29469 } 29470 v.reset(OpAMD64SHLL) 29471 v.AddArg2(x, y) 29472 return true 29473 } 29474 return false 29475 } 29476 func rewriteValueAMD64_OpLsh16x32(v *Value) bool { 29477 v_1 := v.Args[1] 29478 v_0 := v.Args[0] 29479 b := v.Block 29480 // match: (Lsh16x32 <t> x y) 29481 // cond: !shiftIsBounded(v) 29482 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 29483 for { 29484 t := v.Type 29485 x := v_0 29486 y := v_1 29487 if !(!shiftIsBounded(v)) { 29488 break 29489 } 29490 v.reset(OpAMD64ANDL) 29491 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 29492 v0.AddArg2(x, y) 29493 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 29494 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 29495 v2.AuxInt = int32ToAuxInt(32) 29496 v2.AddArg(y) 29497 v1.AddArg(v2) 29498 v.AddArg2(v0, v1) 29499 return true 29500 } 29501 // match: (Lsh16x32 x y) 29502 // cond: shiftIsBounded(v) 29503 // result: (SHLL x y) 29504 for { 29505 x := v_0 29506 y := v_1 29507 if !(shiftIsBounded(v)) { 29508 break 29509 } 29510 v.reset(OpAMD64SHLL) 29511 v.AddArg2(x, y) 29512 return true 29513 } 29514 return false 29515 } 29516 func rewriteValueAMD64_OpLsh16x64(v *Value) bool { 29517 v_1 := v.Args[1] 29518 v_0 := v.Args[0] 29519 b := v.Block 29520 // match: (Lsh16x64 <t> x y) 29521 // cond: !shiftIsBounded(v) 29522 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 29523 for { 29524 t := v.Type 29525 x := v_0 29526 y := v_1 29527 if !(!shiftIsBounded(v)) { 29528 break 29529 } 29530 v.reset(OpAMD64ANDL) 29531 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 29532 v0.AddArg2(x, y) 29533 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 29534 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 29535 v2.AuxInt = int32ToAuxInt(32) 29536 v2.AddArg(y) 29537 v1.AddArg(v2) 29538 v.AddArg2(v0, v1) 29539 return true 29540 } 29541 // match: (Lsh16x64 x y) 29542 // cond: shiftIsBounded(v) 29543 // result: (SHLL x y) 29544 for { 29545 x := v_0 29546 y := v_1 29547 if !(shiftIsBounded(v)) { 29548 break 29549 } 29550 v.reset(OpAMD64SHLL) 29551 v.AddArg2(x, y) 29552 return true 29553 } 29554 return false 29555 } 29556 func rewriteValueAMD64_OpLsh16x8(v *Value) bool { 29557 v_1 := v.Args[1] 29558 v_0 := v.Args[0] 29559 b := v.Block 29560 // match: (Lsh16x8 <t> x y) 29561 // cond: !shiftIsBounded(v) 29562 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 29563 for { 29564 t := v.Type 29565 x := v_0 29566 y := v_1 29567 if !(!shiftIsBounded(v)) { 29568 break 29569 } 29570 v.reset(OpAMD64ANDL) 29571 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 29572 v0.AddArg2(x, y) 29573 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 29574 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 29575 v2.AuxInt = int8ToAuxInt(32) 29576 v2.AddArg(y) 29577 v1.AddArg(v2) 29578 v.AddArg2(v0, v1) 29579 return true 29580 } 29581 // match: (Lsh16x8 x y) 29582 // cond: shiftIsBounded(v) 29583 // result: (SHLL x y) 29584 for { 29585 x := v_0 29586 y := v_1 29587 if !(shiftIsBounded(v)) { 29588 break 29589 } 29590 v.reset(OpAMD64SHLL) 29591 v.AddArg2(x, y) 29592 return true 29593 } 29594 return false 29595 } 29596 func rewriteValueAMD64_OpLsh32x16(v *Value) bool { 29597 v_1 := v.Args[1] 29598 v_0 := v.Args[0] 29599 b := v.Block 29600 // match: (Lsh32x16 <t> x y) 29601 // cond: !shiftIsBounded(v) 29602 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 29603 for { 29604 t := v.Type 29605 x := v_0 29606 y := v_1 29607 if !(!shiftIsBounded(v)) { 29608 break 29609 } 29610 v.reset(OpAMD64ANDL) 29611 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 29612 v0.AddArg2(x, y) 29613 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 29614 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 29615 v2.AuxInt = int16ToAuxInt(32) 29616 v2.AddArg(y) 29617 v1.AddArg(v2) 29618 v.AddArg2(v0, v1) 29619 return true 29620 } 29621 // match: (Lsh32x16 x y) 29622 // cond: shiftIsBounded(v) 29623 // result: (SHLL x y) 29624 for { 29625 x := v_0 29626 y := v_1 29627 if !(shiftIsBounded(v)) { 29628 break 29629 } 29630 v.reset(OpAMD64SHLL) 29631 v.AddArg2(x, y) 29632 return true 29633 } 29634 return false 29635 } 29636 func rewriteValueAMD64_OpLsh32x32(v *Value) bool { 29637 v_1 := v.Args[1] 29638 v_0 := v.Args[0] 29639 b := v.Block 29640 // match: (Lsh32x32 <t> x y) 29641 // cond: !shiftIsBounded(v) 29642 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 29643 for { 29644 t := v.Type 29645 x := v_0 29646 y := v_1 29647 if !(!shiftIsBounded(v)) { 29648 break 29649 } 29650 v.reset(OpAMD64ANDL) 29651 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 29652 v0.AddArg2(x, y) 29653 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 29654 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 29655 v2.AuxInt = int32ToAuxInt(32) 29656 v2.AddArg(y) 29657 v1.AddArg(v2) 29658 v.AddArg2(v0, v1) 29659 return true 29660 } 29661 // match: (Lsh32x32 x y) 29662 // cond: shiftIsBounded(v) 29663 // result: (SHLL x y) 29664 for { 29665 x := v_0 29666 y := v_1 29667 if !(shiftIsBounded(v)) { 29668 break 29669 } 29670 v.reset(OpAMD64SHLL) 29671 v.AddArg2(x, y) 29672 return true 29673 } 29674 return false 29675 } 29676 func rewriteValueAMD64_OpLsh32x64(v *Value) bool { 29677 v_1 := v.Args[1] 29678 v_0 := v.Args[0] 29679 b := v.Block 29680 // match: (Lsh32x64 <t> x y) 29681 // cond: !shiftIsBounded(v) 29682 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 29683 for { 29684 t := v.Type 29685 x := v_0 29686 y := v_1 29687 if !(!shiftIsBounded(v)) { 29688 break 29689 } 29690 v.reset(OpAMD64ANDL) 29691 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 29692 v0.AddArg2(x, y) 29693 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 29694 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 29695 v2.AuxInt = int32ToAuxInt(32) 29696 v2.AddArg(y) 29697 v1.AddArg(v2) 29698 v.AddArg2(v0, v1) 29699 return true 29700 } 29701 // match: (Lsh32x64 x y) 29702 // cond: shiftIsBounded(v) 29703 // result: (SHLL x y) 29704 for { 29705 x := v_0 29706 y := v_1 29707 if !(shiftIsBounded(v)) { 29708 break 29709 } 29710 v.reset(OpAMD64SHLL) 29711 v.AddArg2(x, y) 29712 return true 29713 } 29714 return false 29715 } 29716 func rewriteValueAMD64_OpLsh32x8(v *Value) bool { 29717 v_1 := v.Args[1] 29718 v_0 := v.Args[0] 29719 b := v.Block 29720 // match: (Lsh32x8 <t> x y) 29721 // cond: !shiftIsBounded(v) 29722 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 29723 for { 29724 t := v.Type 29725 x := v_0 29726 y := v_1 29727 if !(!shiftIsBounded(v)) { 29728 break 29729 } 29730 v.reset(OpAMD64ANDL) 29731 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 29732 v0.AddArg2(x, y) 29733 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 29734 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 29735 v2.AuxInt = int8ToAuxInt(32) 29736 v2.AddArg(y) 29737 v1.AddArg(v2) 29738 v.AddArg2(v0, v1) 29739 return true 29740 } 29741 // match: (Lsh32x8 x y) 29742 // cond: shiftIsBounded(v) 29743 // result: (SHLL x y) 29744 for { 29745 x := v_0 29746 y := v_1 29747 if !(shiftIsBounded(v)) { 29748 break 29749 } 29750 v.reset(OpAMD64SHLL) 29751 v.AddArg2(x, y) 29752 return true 29753 } 29754 return false 29755 } 29756 func rewriteValueAMD64_OpLsh64x16(v *Value) bool { 29757 v_1 := v.Args[1] 29758 v_0 := v.Args[0] 29759 b := v.Block 29760 // match: (Lsh64x16 <t> x y) 29761 // cond: !shiftIsBounded(v) 29762 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 29763 for { 29764 t := v.Type 29765 x := v_0 29766 y := v_1 29767 if !(!shiftIsBounded(v)) { 29768 break 29769 } 29770 v.reset(OpAMD64ANDQ) 29771 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 29772 v0.AddArg2(x, y) 29773 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 29774 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 29775 v2.AuxInt = int16ToAuxInt(64) 29776 v2.AddArg(y) 29777 v1.AddArg(v2) 29778 v.AddArg2(v0, v1) 29779 return true 29780 } 29781 // match: (Lsh64x16 x y) 29782 // cond: shiftIsBounded(v) 29783 // result: (SHLQ x y) 29784 for { 29785 x := v_0 29786 y := v_1 29787 if !(shiftIsBounded(v)) { 29788 break 29789 } 29790 v.reset(OpAMD64SHLQ) 29791 v.AddArg2(x, y) 29792 return true 29793 } 29794 return false 29795 } 29796 func rewriteValueAMD64_OpLsh64x32(v *Value) bool { 29797 v_1 := v.Args[1] 29798 v_0 := v.Args[0] 29799 b := v.Block 29800 // match: (Lsh64x32 <t> x y) 29801 // cond: !shiftIsBounded(v) 29802 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 29803 for { 29804 t := v.Type 29805 x := v_0 29806 y := v_1 29807 if !(!shiftIsBounded(v)) { 29808 break 29809 } 29810 v.reset(OpAMD64ANDQ) 29811 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 29812 v0.AddArg2(x, y) 29813 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 29814 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 29815 v2.AuxInt = int32ToAuxInt(64) 29816 v2.AddArg(y) 29817 v1.AddArg(v2) 29818 v.AddArg2(v0, v1) 29819 return true 29820 } 29821 // match: (Lsh64x32 x y) 29822 // cond: shiftIsBounded(v) 29823 // result: (SHLQ x y) 29824 for { 29825 x := v_0 29826 y := v_1 29827 if !(shiftIsBounded(v)) { 29828 break 29829 } 29830 v.reset(OpAMD64SHLQ) 29831 v.AddArg2(x, y) 29832 return true 29833 } 29834 return false 29835 } 29836 func rewriteValueAMD64_OpLsh64x64(v *Value) bool { 29837 v_1 := v.Args[1] 29838 v_0 := v.Args[0] 29839 b := v.Block 29840 // match: (Lsh64x64 <t> x y) 29841 // cond: !shiftIsBounded(v) 29842 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 29843 for { 29844 t := v.Type 29845 x := v_0 29846 y := v_1 29847 if !(!shiftIsBounded(v)) { 29848 break 29849 } 29850 v.reset(OpAMD64ANDQ) 29851 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 29852 v0.AddArg2(x, y) 29853 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 29854 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 29855 v2.AuxInt = int32ToAuxInt(64) 29856 v2.AddArg(y) 29857 v1.AddArg(v2) 29858 v.AddArg2(v0, v1) 29859 return true 29860 } 29861 // match: (Lsh64x64 x y) 29862 // cond: shiftIsBounded(v) 29863 // result: (SHLQ x y) 29864 for { 29865 x := v_0 29866 y := v_1 29867 if !(shiftIsBounded(v)) { 29868 break 29869 } 29870 v.reset(OpAMD64SHLQ) 29871 v.AddArg2(x, y) 29872 return true 29873 } 29874 return false 29875 } 29876 func rewriteValueAMD64_OpLsh64x8(v *Value) bool { 29877 v_1 := v.Args[1] 29878 v_0 := v.Args[0] 29879 b := v.Block 29880 // match: (Lsh64x8 <t> x y) 29881 // cond: !shiftIsBounded(v) 29882 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 29883 for { 29884 t := v.Type 29885 x := v_0 29886 y := v_1 29887 if !(!shiftIsBounded(v)) { 29888 break 29889 } 29890 v.reset(OpAMD64ANDQ) 29891 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 29892 v0.AddArg2(x, y) 29893 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 29894 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 29895 v2.AuxInt = int8ToAuxInt(64) 29896 v2.AddArg(y) 29897 v1.AddArg(v2) 29898 v.AddArg2(v0, v1) 29899 return true 29900 } 29901 // match: (Lsh64x8 x y) 29902 // cond: shiftIsBounded(v) 29903 // result: (SHLQ x y) 29904 for { 29905 x := v_0 29906 y := v_1 29907 if !(shiftIsBounded(v)) { 29908 break 29909 } 29910 v.reset(OpAMD64SHLQ) 29911 v.AddArg2(x, y) 29912 return true 29913 } 29914 return false 29915 } 29916 func rewriteValueAMD64_OpLsh8x16(v *Value) bool { 29917 v_1 := v.Args[1] 29918 v_0 := v.Args[0] 29919 b := v.Block 29920 // match: (Lsh8x16 <t> x y) 29921 // cond: !shiftIsBounded(v) 29922 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 29923 for { 29924 t := v.Type 29925 x := v_0 29926 y := v_1 29927 if !(!shiftIsBounded(v)) { 29928 break 29929 } 29930 v.reset(OpAMD64ANDL) 29931 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 29932 v0.AddArg2(x, y) 29933 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 29934 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 29935 v2.AuxInt = int16ToAuxInt(32) 29936 v2.AddArg(y) 29937 v1.AddArg(v2) 29938 v.AddArg2(v0, v1) 29939 return true 29940 } 29941 // match: (Lsh8x16 x y) 29942 // cond: shiftIsBounded(v) 29943 // result: (SHLL x y) 29944 for { 29945 x := v_0 29946 y := v_1 29947 if !(shiftIsBounded(v)) { 29948 break 29949 } 29950 v.reset(OpAMD64SHLL) 29951 v.AddArg2(x, y) 29952 return true 29953 } 29954 return false 29955 } 29956 func rewriteValueAMD64_OpLsh8x32(v *Value) bool { 29957 v_1 := v.Args[1] 29958 v_0 := v.Args[0] 29959 b := v.Block 29960 // match: (Lsh8x32 <t> x y) 29961 // cond: !shiftIsBounded(v) 29962 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 29963 for { 29964 t := v.Type 29965 x := v_0 29966 y := v_1 29967 if !(!shiftIsBounded(v)) { 29968 break 29969 } 29970 v.reset(OpAMD64ANDL) 29971 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 29972 v0.AddArg2(x, y) 29973 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 29974 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 29975 v2.AuxInt = int32ToAuxInt(32) 29976 v2.AddArg(y) 29977 v1.AddArg(v2) 29978 v.AddArg2(v0, v1) 29979 return true 29980 } 29981 // match: (Lsh8x32 x y) 29982 // cond: shiftIsBounded(v) 29983 // result: (SHLL x y) 29984 for { 29985 x := v_0 29986 y := v_1 29987 if !(shiftIsBounded(v)) { 29988 break 29989 } 29990 v.reset(OpAMD64SHLL) 29991 v.AddArg2(x, y) 29992 return true 29993 } 29994 return false 29995 } 29996 func rewriteValueAMD64_OpLsh8x64(v *Value) bool { 29997 v_1 := v.Args[1] 29998 v_0 := v.Args[0] 29999 b := v.Block 30000 // match: (Lsh8x64 <t> x y) 30001 // cond: !shiftIsBounded(v) 30002 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 30003 for { 30004 t := v.Type 30005 x := v_0 30006 y := v_1 30007 if !(!shiftIsBounded(v)) { 30008 break 30009 } 30010 v.reset(OpAMD64ANDL) 30011 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 30012 v0.AddArg2(x, y) 30013 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 30014 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 30015 v2.AuxInt = int32ToAuxInt(32) 30016 v2.AddArg(y) 30017 v1.AddArg(v2) 30018 v.AddArg2(v0, v1) 30019 return true 30020 } 30021 // match: (Lsh8x64 x y) 30022 // cond: shiftIsBounded(v) 30023 // result: (SHLL x y) 30024 for { 30025 x := v_0 30026 y := v_1 30027 if !(shiftIsBounded(v)) { 30028 break 30029 } 30030 v.reset(OpAMD64SHLL) 30031 v.AddArg2(x, y) 30032 return true 30033 } 30034 return false 30035 } 30036 func rewriteValueAMD64_OpLsh8x8(v *Value) bool { 30037 v_1 := v.Args[1] 30038 v_0 := v.Args[0] 30039 b := v.Block 30040 // match: (Lsh8x8 <t> x y) 30041 // cond: !shiftIsBounded(v) 30042 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 30043 for { 30044 t := v.Type 30045 x := v_0 30046 y := v_1 30047 if !(!shiftIsBounded(v)) { 30048 break 30049 } 30050 v.reset(OpAMD64ANDL) 30051 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 30052 v0.AddArg2(x, y) 30053 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 30054 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 30055 v2.AuxInt = int8ToAuxInt(32) 30056 v2.AddArg(y) 30057 v1.AddArg(v2) 30058 v.AddArg2(v0, v1) 30059 return true 30060 } 30061 // match: (Lsh8x8 x y) 30062 // cond: shiftIsBounded(v) 30063 // result: (SHLL x y) 30064 for { 30065 x := v_0 30066 y := v_1 30067 if !(shiftIsBounded(v)) { 30068 break 30069 } 30070 v.reset(OpAMD64SHLL) 30071 v.AddArg2(x, y) 30072 return true 30073 } 30074 return false 30075 } 30076 func rewriteValueAMD64_OpMod16(v *Value) bool { 30077 v_1 := v.Args[1] 30078 v_0 := v.Args[0] 30079 b := v.Block 30080 typ := &b.Func.Config.Types 30081 // match: (Mod16 [a] x y) 30082 // result: (Select1 (DIVW [a] x y)) 30083 for { 30084 a := auxIntToBool(v.AuxInt) 30085 x := v_0 30086 y := v_1 30087 v.reset(OpSelect1) 30088 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 30089 v0.AuxInt = boolToAuxInt(a) 30090 v0.AddArg2(x, y) 30091 v.AddArg(v0) 30092 return true 30093 } 30094 } 30095 func rewriteValueAMD64_OpMod16u(v *Value) bool { 30096 v_1 := v.Args[1] 30097 v_0 := v.Args[0] 30098 b := v.Block 30099 typ := &b.Func.Config.Types 30100 // match: (Mod16u x y) 30101 // result: (Select1 (DIVWU x y)) 30102 for { 30103 x := v_0 30104 y := v_1 30105 v.reset(OpSelect1) 30106 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 30107 v0.AddArg2(x, y) 30108 v.AddArg(v0) 30109 return true 30110 } 30111 } 30112 func rewriteValueAMD64_OpMod32(v *Value) bool { 30113 v_1 := v.Args[1] 30114 v_0 := v.Args[0] 30115 b := v.Block 30116 typ := &b.Func.Config.Types 30117 // match: (Mod32 [a] x y) 30118 // result: (Select1 (DIVL [a] x y)) 30119 for { 30120 a := auxIntToBool(v.AuxInt) 30121 x := v_0 30122 y := v_1 30123 v.reset(OpSelect1) 30124 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 30125 v0.AuxInt = boolToAuxInt(a) 30126 v0.AddArg2(x, y) 30127 v.AddArg(v0) 30128 return true 30129 } 30130 } 30131 func rewriteValueAMD64_OpMod32u(v *Value) bool { 30132 v_1 := v.Args[1] 30133 v_0 := v.Args[0] 30134 b := v.Block 30135 typ := &b.Func.Config.Types 30136 // match: (Mod32u x y) 30137 // result: (Select1 (DIVLU x y)) 30138 for { 30139 x := v_0 30140 y := v_1 30141 v.reset(OpSelect1) 30142 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 30143 v0.AddArg2(x, y) 30144 v.AddArg(v0) 30145 return true 30146 } 30147 } 30148 func rewriteValueAMD64_OpMod64(v *Value) bool { 30149 v_1 := v.Args[1] 30150 v_0 := v.Args[0] 30151 b := v.Block 30152 typ := &b.Func.Config.Types 30153 // match: (Mod64 [a] x y) 30154 // result: (Select1 (DIVQ [a] x y)) 30155 for { 30156 a := auxIntToBool(v.AuxInt) 30157 x := v_0 30158 y := v_1 30159 v.reset(OpSelect1) 30160 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 30161 v0.AuxInt = boolToAuxInt(a) 30162 v0.AddArg2(x, y) 30163 v.AddArg(v0) 30164 return true 30165 } 30166 } 30167 func rewriteValueAMD64_OpMod64u(v *Value) bool { 30168 v_1 := v.Args[1] 30169 v_0 := v.Args[0] 30170 b := v.Block 30171 typ := &b.Func.Config.Types 30172 // match: (Mod64u x y) 30173 // result: (Select1 (DIVQU x y)) 30174 for { 30175 x := v_0 30176 y := v_1 30177 v.reset(OpSelect1) 30178 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 30179 v0.AddArg2(x, y) 30180 v.AddArg(v0) 30181 return true 30182 } 30183 } 30184 func rewriteValueAMD64_OpMod8(v *Value) bool { 30185 v_1 := v.Args[1] 30186 v_0 := v.Args[0] 30187 b := v.Block 30188 typ := &b.Func.Config.Types 30189 // match: (Mod8 x y) 30190 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 30191 for { 30192 x := v_0 30193 y := v_1 30194 v.reset(OpSelect1) 30195 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 30196 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 30197 v1.AddArg(x) 30198 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 30199 v2.AddArg(y) 30200 v0.AddArg2(v1, v2) 30201 v.AddArg(v0) 30202 return true 30203 } 30204 } 30205 func rewriteValueAMD64_OpMod8u(v *Value) bool { 30206 v_1 := v.Args[1] 30207 v_0 := v.Args[0] 30208 b := v.Block 30209 typ := &b.Func.Config.Types 30210 // match: (Mod8u x y) 30211 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 30212 for { 30213 x := v_0 30214 y := v_1 30215 v.reset(OpSelect1) 30216 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 30217 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 30218 v1.AddArg(x) 30219 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 30220 v2.AddArg(y) 30221 v0.AddArg2(v1, v2) 30222 v.AddArg(v0) 30223 return true 30224 } 30225 } 30226 func rewriteValueAMD64_OpMove(v *Value) bool { 30227 v_2 := v.Args[2] 30228 v_1 := v.Args[1] 30229 v_0 := v.Args[0] 30230 b := v.Block 30231 config := b.Func.Config 30232 typ := &b.Func.Config.Types 30233 // match: (Move [0] _ _ mem) 30234 // result: mem 30235 for { 30236 if auxIntToInt64(v.AuxInt) != 0 { 30237 break 30238 } 30239 mem := v_2 30240 v.copyOf(mem) 30241 return true 30242 } 30243 // match: (Move [1] dst src mem) 30244 // result: (MOVBstore dst (MOVBload src mem) mem) 30245 for { 30246 if auxIntToInt64(v.AuxInt) != 1 { 30247 break 30248 } 30249 dst := v_0 30250 src := v_1 30251 mem := v_2 30252 v.reset(OpAMD64MOVBstore) 30253 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 30254 v0.AddArg2(src, mem) 30255 v.AddArg3(dst, v0, mem) 30256 return true 30257 } 30258 // match: (Move [2] dst src mem) 30259 // result: (MOVWstore dst (MOVWload src mem) mem) 30260 for { 30261 if auxIntToInt64(v.AuxInt) != 2 { 30262 break 30263 } 30264 dst := v_0 30265 src := v_1 30266 mem := v_2 30267 v.reset(OpAMD64MOVWstore) 30268 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 30269 v0.AddArg2(src, mem) 30270 v.AddArg3(dst, v0, mem) 30271 return true 30272 } 30273 // match: (Move [4] dst src mem) 30274 // result: (MOVLstore dst (MOVLload src mem) mem) 30275 for { 30276 if auxIntToInt64(v.AuxInt) != 4 { 30277 break 30278 } 30279 dst := v_0 30280 src := v_1 30281 mem := v_2 30282 v.reset(OpAMD64MOVLstore) 30283 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 30284 v0.AddArg2(src, mem) 30285 v.AddArg3(dst, v0, mem) 30286 return true 30287 } 30288 // match: (Move [8] dst src mem) 30289 // result: (MOVQstore dst (MOVQload src mem) mem) 30290 for { 30291 if auxIntToInt64(v.AuxInt) != 8 { 30292 break 30293 } 30294 dst := v_0 30295 src := v_1 30296 mem := v_2 30297 v.reset(OpAMD64MOVQstore) 30298 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 30299 v0.AddArg2(src, mem) 30300 v.AddArg3(dst, v0, mem) 30301 return true 30302 } 30303 // match: (Move [16] dst src mem) 30304 // cond: config.useSSE 30305 // result: (MOVOstore dst (MOVOload src mem) mem) 30306 for { 30307 if auxIntToInt64(v.AuxInt) != 16 { 30308 break 30309 } 30310 dst := v_0 30311 src := v_1 30312 mem := v_2 30313 if !(config.useSSE) { 30314 break 30315 } 30316 v.reset(OpAMD64MOVOstore) 30317 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 30318 v0.AddArg2(src, mem) 30319 v.AddArg3(dst, v0, mem) 30320 return true 30321 } 30322 // match: (Move [16] dst src mem) 30323 // cond: !config.useSSE 30324 // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 30325 for { 30326 if auxIntToInt64(v.AuxInt) != 16 { 30327 break 30328 } 30329 dst := v_0 30330 src := v_1 30331 mem := v_2 30332 if !(!config.useSSE) { 30333 break 30334 } 30335 v.reset(OpAMD64MOVQstore) 30336 v.AuxInt = int32ToAuxInt(8) 30337 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 30338 v0.AuxInt = int32ToAuxInt(8) 30339 v0.AddArg2(src, mem) 30340 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 30341 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 30342 v2.AddArg2(src, mem) 30343 v1.AddArg3(dst, v2, mem) 30344 v.AddArg3(dst, v0, v1) 30345 return true 30346 } 30347 // match: (Move [32] dst src mem) 30348 // result: (Move [16] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem)) 30349 for { 30350 if auxIntToInt64(v.AuxInt) != 32 { 30351 break 30352 } 30353 dst := v_0 30354 src := v_1 30355 mem := v_2 30356 v.reset(OpMove) 30357 v.AuxInt = int64ToAuxInt(16) 30358 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 30359 v0.AuxInt = int64ToAuxInt(16) 30360 v0.AddArg(dst) 30361 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 30362 v1.AuxInt = int64ToAuxInt(16) 30363 v1.AddArg(src) 30364 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 30365 v2.AuxInt = int64ToAuxInt(16) 30366 v2.AddArg3(dst, src, mem) 30367 v.AddArg3(v0, v1, v2) 30368 return true 30369 } 30370 // match: (Move [48] dst src mem) 30371 // cond: config.useSSE 30372 // result: (Move [32] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem)) 30373 for { 30374 if auxIntToInt64(v.AuxInt) != 48 { 30375 break 30376 } 30377 dst := v_0 30378 src := v_1 30379 mem := v_2 30380 if !(config.useSSE) { 30381 break 30382 } 30383 v.reset(OpMove) 30384 v.AuxInt = int64ToAuxInt(32) 30385 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 30386 v0.AuxInt = int64ToAuxInt(16) 30387 v0.AddArg(dst) 30388 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 30389 v1.AuxInt = int64ToAuxInt(16) 30390 v1.AddArg(src) 30391 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 30392 v2.AuxInt = int64ToAuxInt(16) 30393 v2.AddArg3(dst, src, mem) 30394 v.AddArg3(v0, v1, v2) 30395 return true 30396 } 30397 // match: (Move [64] dst src mem) 30398 // cond: config.useSSE 30399 // result: (Move [32] (OffPtr <dst.Type> dst [32]) (OffPtr <src.Type> src [32]) (Move [32] dst src mem)) 30400 for { 30401 if auxIntToInt64(v.AuxInt) != 64 { 30402 break 30403 } 30404 dst := v_0 30405 src := v_1 30406 mem := v_2 30407 if !(config.useSSE) { 30408 break 30409 } 30410 v.reset(OpMove) 30411 v.AuxInt = int64ToAuxInt(32) 30412 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 30413 v0.AuxInt = int64ToAuxInt(32) 30414 v0.AddArg(dst) 30415 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 30416 v1.AuxInt = int64ToAuxInt(32) 30417 v1.AddArg(src) 30418 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 30419 v2.AuxInt = int64ToAuxInt(32) 30420 v2.AddArg3(dst, src, mem) 30421 v.AddArg3(v0, v1, v2) 30422 return true 30423 } 30424 // match: (Move [3] dst src mem) 30425 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 30426 for { 30427 if auxIntToInt64(v.AuxInt) != 3 { 30428 break 30429 } 30430 dst := v_0 30431 src := v_1 30432 mem := v_2 30433 v.reset(OpAMD64MOVBstore) 30434 v.AuxInt = int32ToAuxInt(2) 30435 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 30436 v0.AuxInt = int32ToAuxInt(2) 30437 v0.AddArg2(src, mem) 30438 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 30439 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 30440 v2.AddArg2(src, mem) 30441 v1.AddArg3(dst, v2, mem) 30442 v.AddArg3(dst, v0, v1) 30443 return true 30444 } 30445 // match: (Move [5] dst src mem) 30446 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 30447 for { 30448 if auxIntToInt64(v.AuxInt) != 5 { 30449 break 30450 } 30451 dst := v_0 30452 src := v_1 30453 mem := v_2 30454 v.reset(OpAMD64MOVBstore) 30455 v.AuxInt = int32ToAuxInt(4) 30456 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 30457 v0.AuxInt = int32ToAuxInt(4) 30458 v0.AddArg2(src, mem) 30459 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 30460 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 30461 v2.AddArg2(src, mem) 30462 v1.AddArg3(dst, v2, mem) 30463 v.AddArg3(dst, v0, v1) 30464 return true 30465 } 30466 // match: (Move [6] dst src mem) 30467 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 30468 for { 30469 if auxIntToInt64(v.AuxInt) != 6 { 30470 break 30471 } 30472 dst := v_0 30473 src := v_1 30474 mem := v_2 30475 v.reset(OpAMD64MOVWstore) 30476 v.AuxInt = int32ToAuxInt(4) 30477 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 30478 v0.AuxInt = int32ToAuxInt(4) 30479 v0.AddArg2(src, mem) 30480 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 30481 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 30482 v2.AddArg2(src, mem) 30483 v1.AddArg3(dst, v2, mem) 30484 v.AddArg3(dst, v0, v1) 30485 return true 30486 } 30487 // match: (Move [7] dst src mem) 30488 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 30489 for { 30490 if auxIntToInt64(v.AuxInt) != 7 { 30491 break 30492 } 30493 dst := v_0 30494 src := v_1 30495 mem := v_2 30496 v.reset(OpAMD64MOVLstore) 30497 v.AuxInt = int32ToAuxInt(3) 30498 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 30499 v0.AuxInt = int32ToAuxInt(3) 30500 v0.AddArg2(src, mem) 30501 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 30502 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 30503 v2.AddArg2(src, mem) 30504 v1.AddArg3(dst, v2, mem) 30505 v.AddArg3(dst, v0, v1) 30506 return true 30507 } 30508 // match: (Move [9] dst src mem) 30509 // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 30510 for { 30511 if auxIntToInt64(v.AuxInt) != 9 { 30512 break 30513 } 30514 dst := v_0 30515 src := v_1 30516 mem := v_2 30517 v.reset(OpAMD64MOVBstore) 30518 v.AuxInt = int32ToAuxInt(8) 30519 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 30520 v0.AuxInt = int32ToAuxInt(8) 30521 v0.AddArg2(src, mem) 30522 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 30523 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 30524 v2.AddArg2(src, mem) 30525 v1.AddArg3(dst, v2, mem) 30526 v.AddArg3(dst, v0, v1) 30527 return true 30528 } 30529 // match: (Move [10] dst src mem) 30530 // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 30531 for { 30532 if auxIntToInt64(v.AuxInt) != 10 { 30533 break 30534 } 30535 dst := v_0 30536 src := v_1 30537 mem := v_2 30538 v.reset(OpAMD64MOVWstore) 30539 v.AuxInt = int32ToAuxInt(8) 30540 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 30541 v0.AuxInt = int32ToAuxInt(8) 30542 v0.AddArg2(src, mem) 30543 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 30544 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 30545 v2.AddArg2(src, mem) 30546 v1.AddArg3(dst, v2, mem) 30547 v.AddArg3(dst, v0, v1) 30548 return true 30549 } 30550 // match: (Move [12] dst src mem) 30551 // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 30552 for { 30553 if auxIntToInt64(v.AuxInt) != 12 { 30554 break 30555 } 30556 dst := v_0 30557 src := v_1 30558 mem := v_2 30559 v.reset(OpAMD64MOVLstore) 30560 v.AuxInt = int32ToAuxInt(8) 30561 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 30562 v0.AuxInt = int32ToAuxInt(8) 30563 v0.AddArg2(src, mem) 30564 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 30565 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 30566 v2.AddArg2(src, mem) 30567 v1.AddArg3(dst, v2, mem) 30568 v.AddArg3(dst, v0, v1) 30569 return true 30570 } 30571 // match: (Move [s] dst src mem) 30572 // cond: s == 11 || s >= 13 && s <= 15 30573 // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) 30574 for { 30575 s := auxIntToInt64(v.AuxInt) 30576 dst := v_0 30577 src := v_1 30578 mem := v_2 30579 if !(s == 11 || s >= 13 && s <= 15) { 30580 break 30581 } 30582 v.reset(OpAMD64MOVQstore) 30583 v.AuxInt = int32ToAuxInt(int32(s - 8)) 30584 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 30585 v0.AuxInt = int32ToAuxInt(int32(s - 8)) 30586 v0.AddArg2(src, mem) 30587 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 30588 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 30589 v2.AddArg2(src, mem) 30590 v1.AddArg3(dst, v2, mem) 30591 v.AddArg3(dst, v0, v1) 30592 return true 30593 } 30594 // match: (Move [s] dst src mem) 30595 // cond: s > 16 && s%16 != 0 && s%16 <= 8 30596 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 30597 for { 30598 s := auxIntToInt64(v.AuxInt) 30599 dst := v_0 30600 src := v_1 30601 mem := v_2 30602 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 30603 break 30604 } 30605 v.reset(OpMove) 30606 v.AuxInt = int64ToAuxInt(s - s%16) 30607 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 30608 v0.AuxInt = int64ToAuxInt(s % 16) 30609 v0.AddArg(dst) 30610 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 30611 v1.AuxInt = int64ToAuxInt(s % 16) 30612 v1.AddArg(src) 30613 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 30614 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 30615 v3.AddArg2(src, mem) 30616 v2.AddArg3(dst, v3, mem) 30617 v.AddArg3(v0, v1, v2) 30618 return true 30619 } 30620 // match: (Move [s] dst src mem) 30621 // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE 30622 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 30623 for { 30624 s := auxIntToInt64(v.AuxInt) 30625 dst := v_0 30626 src := v_1 30627 mem := v_2 30628 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { 30629 break 30630 } 30631 v.reset(OpMove) 30632 v.AuxInt = int64ToAuxInt(s - s%16) 30633 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 30634 v0.AuxInt = int64ToAuxInt(s % 16) 30635 v0.AddArg(dst) 30636 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 30637 v1.AuxInt = int64ToAuxInt(s % 16) 30638 v1.AddArg(src) 30639 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 30640 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 30641 v3.AddArg2(src, mem) 30642 v2.AddArg3(dst, v3, mem) 30643 v.AddArg3(v0, v1, v2) 30644 return true 30645 } 30646 // match: (Move [s] dst src mem) 30647 // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE 30648 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) 30649 for { 30650 s := auxIntToInt64(v.AuxInt) 30651 dst := v_0 30652 src := v_1 30653 mem := v_2 30654 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { 30655 break 30656 } 30657 v.reset(OpMove) 30658 v.AuxInt = int64ToAuxInt(s - s%16) 30659 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 30660 v0.AuxInt = int64ToAuxInt(s % 16) 30661 v0.AddArg(dst) 30662 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 30663 v1.AuxInt = int64ToAuxInt(s % 16) 30664 v1.AddArg(src) 30665 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 30666 v2.AuxInt = int32ToAuxInt(8) 30667 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 30668 v3.AuxInt = int32ToAuxInt(8) 30669 v3.AddArg2(src, mem) 30670 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 30671 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 30672 v5.AddArg2(src, mem) 30673 v4.AddArg3(dst, v5, mem) 30674 v2.AddArg3(dst, v3, v4) 30675 v.AddArg3(v0, v1, v2) 30676 return true 30677 } 30678 // match: (Move [s] dst src mem) 30679 // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s) 30680 // result: (DUFFCOPY [s] dst src mem) 30681 for { 30682 s := auxIntToInt64(v.AuxInt) 30683 dst := v_0 30684 src := v_1 30685 mem := v_2 30686 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { 30687 break 30688 } 30689 v.reset(OpAMD64DUFFCOPY) 30690 v.AuxInt = int64ToAuxInt(s) 30691 v.AddArg3(dst, src, mem) 30692 return true 30693 } 30694 // match: (Move [s] dst src mem) 30695 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s) 30696 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 30697 for { 30698 s := auxIntToInt64(v.AuxInt) 30699 dst := v_0 30700 src := v_1 30701 mem := v_2 30702 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) { 30703 break 30704 } 30705 v.reset(OpAMD64REPMOVSQ) 30706 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 30707 v0.AuxInt = int64ToAuxInt(s / 8) 30708 v.AddArg4(dst, src, v0, mem) 30709 return true 30710 } 30711 return false 30712 } 30713 func rewriteValueAMD64_OpNeg32F(v *Value) bool { 30714 v_0 := v.Args[0] 30715 b := v.Block 30716 typ := &b.Func.Config.Types 30717 // match: (Neg32F x) 30718 // result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))])) 30719 for { 30720 x := v_0 30721 v.reset(OpAMD64PXOR) 30722 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 30723 v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) 30724 v.AddArg2(x, v0) 30725 return true 30726 } 30727 } 30728 func rewriteValueAMD64_OpNeg64F(v *Value) bool { 30729 v_0 := v.Args[0] 30730 b := v.Block 30731 typ := &b.Func.Config.Types 30732 // match: (Neg64F x) 30733 // result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)])) 30734 for { 30735 x := v_0 30736 v.reset(OpAMD64PXOR) 30737 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 30738 v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) 30739 v.AddArg2(x, v0) 30740 return true 30741 } 30742 } 30743 func rewriteValueAMD64_OpNeq16(v *Value) bool { 30744 v_1 := v.Args[1] 30745 v_0 := v.Args[0] 30746 b := v.Block 30747 // match: (Neq16 x y) 30748 // result: (SETNE (CMPW x y)) 30749 for { 30750 x := v_0 30751 y := v_1 30752 v.reset(OpAMD64SETNE) 30753 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 30754 v0.AddArg2(x, y) 30755 v.AddArg(v0) 30756 return true 30757 } 30758 } 30759 func rewriteValueAMD64_OpNeq32(v *Value) bool { 30760 v_1 := v.Args[1] 30761 v_0 := v.Args[0] 30762 b := v.Block 30763 // match: (Neq32 x y) 30764 // result: (SETNE (CMPL x y)) 30765 for { 30766 x := v_0 30767 y := v_1 30768 v.reset(OpAMD64SETNE) 30769 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 30770 v0.AddArg2(x, y) 30771 v.AddArg(v0) 30772 return true 30773 } 30774 } 30775 func rewriteValueAMD64_OpNeq32F(v *Value) bool { 30776 v_1 := v.Args[1] 30777 v_0 := v.Args[0] 30778 b := v.Block 30779 // match: (Neq32F x y) 30780 // result: (SETNEF (UCOMISS x y)) 30781 for { 30782 x := v_0 30783 y := v_1 30784 v.reset(OpAMD64SETNEF) 30785 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 30786 v0.AddArg2(x, y) 30787 v.AddArg(v0) 30788 return true 30789 } 30790 } 30791 func rewriteValueAMD64_OpNeq64(v *Value) bool { 30792 v_1 := v.Args[1] 30793 v_0 := v.Args[0] 30794 b := v.Block 30795 // match: (Neq64 x y) 30796 // result: (SETNE (CMPQ x y)) 30797 for { 30798 x := v_0 30799 y := v_1 30800 v.reset(OpAMD64SETNE) 30801 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 30802 v0.AddArg2(x, y) 30803 v.AddArg(v0) 30804 return true 30805 } 30806 } 30807 func rewriteValueAMD64_OpNeq64F(v *Value) bool { 30808 v_1 := v.Args[1] 30809 v_0 := v.Args[0] 30810 b := v.Block 30811 // match: (Neq64F x y) 30812 // result: (SETNEF (UCOMISD x y)) 30813 for { 30814 x := v_0 30815 y := v_1 30816 v.reset(OpAMD64SETNEF) 30817 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 30818 v0.AddArg2(x, y) 30819 v.AddArg(v0) 30820 return true 30821 } 30822 } 30823 func rewriteValueAMD64_OpNeq8(v *Value) bool { 30824 v_1 := v.Args[1] 30825 v_0 := v.Args[0] 30826 b := v.Block 30827 // match: (Neq8 x y) 30828 // result: (SETNE (CMPB x y)) 30829 for { 30830 x := v_0 30831 y := v_1 30832 v.reset(OpAMD64SETNE) 30833 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 30834 v0.AddArg2(x, y) 30835 v.AddArg(v0) 30836 return true 30837 } 30838 } 30839 func rewriteValueAMD64_OpNeqB(v *Value) bool { 30840 v_1 := v.Args[1] 30841 v_0 := v.Args[0] 30842 b := v.Block 30843 // match: (NeqB x y) 30844 // result: (SETNE (CMPB x y)) 30845 for { 30846 x := v_0 30847 y := v_1 30848 v.reset(OpAMD64SETNE) 30849 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 30850 v0.AddArg2(x, y) 30851 v.AddArg(v0) 30852 return true 30853 } 30854 } 30855 func rewriteValueAMD64_OpNeqPtr(v *Value) bool { 30856 v_1 := v.Args[1] 30857 v_0 := v.Args[0] 30858 b := v.Block 30859 // match: (NeqPtr x y) 30860 // result: (SETNE (CMPQ x y)) 30861 for { 30862 x := v_0 30863 y := v_1 30864 v.reset(OpAMD64SETNE) 30865 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 30866 v0.AddArg2(x, y) 30867 v.AddArg(v0) 30868 return true 30869 } 30870 } 30871 func rewriteValueAMD64_OpNot(v *Value) bool { 30872 v_0 := v.Args[0] 30873 // match: (Not x) 30874 // result: (XORLconst [1] x) 30875 for { 30876 x := v_0 30877 v.reset(OpAMD64XORLconst) 30878 v.AuxInt = int32ToAuxInt(1) 30879 v.AddArg(x) 30880 return true 30881 } 30882 } 30883 func rewriteValueAMD64_OpOffPtr(v *Value) bool { 30884 v_0 := v.Args[0] 30885 b := v.Block 30886 typ := &b.Func.Config.Types 30887 // match: (OffPtr [off] ptr) 30888 // cond: is32Bit(off) 30889 // result: (ADDQconst [int32(off)] ptr) 30890 for { 30891 off := auxIntToInt64(v.AuxInt) 30892 ptr := v_0 30893 if !(is32Bit(off)) { 30894 break 30895 } 30896 v.reset(OpAMD64ADDQconst) 30897 v.AuxInt = int32ToAuxInt(int32(off)) 30898 v.AddArg(ptr) 30899 return true 30900 } 30901 // match: (OffPtr [off] ptr) 30902 // result: (ADDQ (MOVQconst [off]) ptr) 30903 for { 30904 off := auxIntToInt64(v.AuxInt) 30905 ptr := v_0 30906 v.reset(OpAMD64ADDQ) 30907 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 30908 v0.AuxInt = int64ToAuxInt(off) 30909 v.AddArg2(v0, ptr) 30910 return true 30911 } 30912 } 30913 func rewriteValueAMD64_OpPanicBounds(v *Value) bool { 30914 v_2 := v.Args[2] 30915 v_1 := v.Args[1] 30916 v_0 := v.Args[0] 30917 // match: (PanicBounds [kind] x y mem) 30918 // cond: boundsABI(kind) == 0 30919 // result: (LoweredPanicBoundsA [kind] x y mem) 30920 for { 30921 kind := auxIntToInt64(v.AuxInt) 30922 x := v_0 30923 y := v_1 30924 mem := v_2 30925 if !(boundsABI(kind) == 0) { 30926 break 30927 } 30928 v.reset(OpAMD64LoweredPanicBoundsA) 30929 v.AuxInt = int64ToAuxInt(kind) 30930 v.AddArg3(x, y, mem) 30931 return true 30932 } 30933 // match: (PanicBounds [kind] x y mem) 30934 // cond: boundsABI(kind) == 1 30935 // result: (LoweredPanicBoundsB [kind] x y mem) 30936 for { 30937 kind := auxIntToInt64(v.AuxInt) 30938 x := v_0 30939 y := v_1 30940 mem := v_2 30941 if !(boundsABI(kind) == 1) { 30942 break 30943 } 30944 v.reset(OpAMD64LoweredPanicBoundsB) 30945 v.AuxInt = int64ToAuxInt(kind) 30946 v.AddArg3(x, y, mem) 30947 return true 30948 } 30949 // match: (PanicBounds [kind] x y mem) 30950 // cond: boundsABI(kind) == 2 30951 // result: (LoweredPanicBoundsC [kind] x y mem) 30952 for { 30953 kind := auxIntToInt64(v.AuxInt) 30954 x := v_0 30955 y := v_1 30956 mem := v_2 30957 if !(boundsABI(kind) == 2) { 30958 break 30959 } 30960 v.reset(OpAMD64LoweredPanicBoundsC) 30961 v.AuxInt = int64ToAuxInt(kind) 30962 v.AddArg3(x, y, mem) 30963 return true 30964 } 30965 return false 30966 } 30967 func rewriteValueAMD64_OpPopCount16(v *Value) bool { 30968 v_0 := v.Args[0] 30969 b := v.Block 30970 typ := &b.Func.Config.Types 30971 // match: (PopCount16 x) 30972 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 30973 for { 30974 x := v_0 30975 v.reset(OpAMD64POPCNTL) 30976 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 30977 v0.AddArg(x) 30978 v.AddArg(v0) 30979 return true 30980 } 30981 } 30982 func rewriteValueAMD64_OpPopCount8(v *Value) bool { 30983 v_0 := v.Args[0] 30984 b := v.Block 30985 typ := &b.Func.Config.Types 30986 // match: (PopCount8 x) 30987 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 30988 for { 30989 x := v_0 30990 v.reset(OpAMD64POPCNTL) 30991 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 30992 v0.AddArg(x) 30993 v.AddArg(v0) 30994 return true 30995 } 30996 } 30997 func rewriteValueAMD64_OpRoundToEven(v *Value) bool { 30998 v_0 := v.Args[0] 30999 // match: (RoundToEven x) 31000 // result: (ROUNDSD [0] x) 31001 for { 31002 x := v_0 31003 v.reset(OpAMD64ROUNDSD) 31004 v.AuxInt = int8ToAuxInt(0) 31005 v.AddArg(x) 31006 return true 31007 } 31008 } 31009 func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { 31010 v_1 := v.Args[1] 31011 v_0 := v.Args[0] 31012 b := v.Block 31013 // match: (Rsh16Ux16 <t> x y) 31014 // cond: !shiftIsBounded(v) 31015 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 31016 for { 31017 t := v.Type 31018 x := v_0 31019 y := v_1 31020 if !(!shiftIsBounded(v)) { 31021 break 31022 } 31023 v.reset(OpAMD64ANDL) 31024 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 31025 v0.AddArg2(x, y) 31026 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 31027 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 31028 v2.AuxInt = int16ToAuxInt(16) 31029 v2.AddArg(y) 31030 v1.AddArg(v2) 31031 v.AddArg2(v0, v1) 31032 return true 31033 } 31034 // match: (Rsh16Ux16 x y) 31035 // cond: shiftIsBounded(v) 31036 // result: (SHRW x y) 31037 for { 31038 x := v_0 31039 y := v_1 31040 if !(shiftIsBounded(v)) { 31041 break 31042 } 31043 v.reset(OpAMD64SHRW) 31044 v.AddArg2(x, y) 31045 return true 31046 } 31047 return false 31048 } 31049 func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { 31050 v_1 := v.Args[1] 31051 v_0 := v.Args[0] 31052 b := v.Block 31053 // match: (Rsh16Ux32 <t> x y) 31054 // cond: !shiftIsBounded(v) 31055 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 31056 for { 31057 t := v.Type 31058 x := v_0 31059 y := v_1 31060 if !(!shiftIsBounded(v)) { 31061 break 31062 } 31063 v.reset(OpAMD64ANDL) 31064 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 31065 v0.AddArg2(x, y) 31066 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 31067 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 31068 v2.AuxInt = int32ToAuxInt(16) 31069 v2.AddArg(y) 31070 v1.AddArg(v2) 31071 v.AddArg2(v0, v1) 31072 return true 31073 } 31074 // match: (Rsh16Ux32 x y) 31075 // cond: shiftIsBounded(v) 31076 // result: (SHRW x y) 31077 for { 31078 x := v_0 31079 y := v_1 31080 if !(shiftIsBounded(v)) { 31081 break 31082 } 31083 v.reset(OpAMD64SHRW) 31084 v.AddArg2(x, y) 31085 return true 31086 } 31087 return false 31088 } 31089 func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { 31090 v_1 := v.Args[1] 31091 v_0 := v.Args[0] 31092 b := v.Block 31093 // match: (Rsh16Ux64 <t> x y) 31094 // cond: !shiftIsBounded(v) 31095 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 31096 for { 31097 t := v.Type 31098 x := v_0 31099 y := v_1 31100 if !(!shiftIsBounded(v)) { 31101 break 31102 } 31103 v.reset(OpAMD64ANDL) 31104 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 31105 v0.AddArg2(x, y) 31106 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 31107 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 31108 v2.AuxInt = int32ToAuxInt(16) 31109 v2.AddArg(y) 31110 v1.AddArg(v2) 31111 v.AddArg2(v0, v1) 31112 return true 31113 } 31114 // match: (Rsh16Ux64 x y) 31115 // cond: shiftIsBounded(v) 31116 // result: (SHRW x y) 31117 for { 31118 x := v_0 31119 y := v_1 31120 if !(shiftIsBounded(v)) { 31121 break 31122 } 31123 v.reset(OpAMD64SHRW) 31124 v.AddArg2(x, y) 31125 return true 31126 } 31127 return false 31128 } 31129 func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { 31130 v_1 := v.Args[1] 31131 v_0 := v.Args[0] 31132 b := v.Block 31133 // match: (Rsh16Ux8 <t> x y) 31134 // cond: !shiftIsBounded(v) 31135 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 31136 for { 31137 t := v.Type 31138 x := v_0 31139 y := v_1 31140 if !(!shiftIsBounded(v)) { 31141 break 31142 } 31143 v.reset(OpAMD64ANDL) 31144 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 31145 v0.AddArg2(x, y) 31146 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 31147 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 31148 v2.AuxInt = int8ToAuxInt(16) 31149 v2.AddArg(y) 31150 v1.AddArg(v2) 31151 v.AddArg2(v0, v1) 31152 return true 31153 } 31154 // match: (Rsh16Ux8 x y) 31155 // cond: shiftIsBounded(v) 31156 // result: (SHRW x y) 31157 for { 31158 x := v_0 31159 y := v_1 31160 if !(shiftIsBounded(v)) { 31161 break 31162 } 31163 v.reset(OpAMD64SHRW) 31164 v.AddArg2(x, y) 31165 return true 31166 } 31167 return false 31168 } 31169 func rewriteValueAMD64_OpRsh16x16(v *Value) bool { 31170 v_1 := v.Args[1] 31171 v_0 := v.Args[0] 31172 b := v.Block 31173 // match: (Rsh16x16 <t> x y) 31174 // cond: !shiftIsBounded(v) 31175 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 31176 for { 31177 t := v.Type 31178 x := v_0 31179 y := v_1 31180 if !(!shiftIsBounded(v)) { 31181 break 31182 } 31183 v.reset(OpAMD64SARW) 31184 v.Type = t 31185 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 31186 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 31187 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 31188 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 31189 v3.AuxInt = int16ToAuxInt(16) 31190 v3.AddArg(y) 31191 v2.AddArg(v3) 31192 v1.AddArg(v2) 31193 v0.AddArg2(y, v1) 31194 v.AddArg2(x, v0) 31195 return true 31196 } 31197 // match: (Rsh16x16 x y) 31198 // cond: shiftIsBounded(v) 31199 // result: (SARW x y) 31200 for { 31201 x := v_0 31202 y := v_1 31203 if !(shiftIsBounded(v)) { 31204 break 31205 } 31206 v.reset(OpAMD64SARW) 31207 v.AddArg2(x, y) 31208 return true 31209 } 31210 return false 31211 } 31212 func rewriteValueAMD64_OpRsh16x32(v *Value) bool { 31213 v_1 := v.Args[1] 31214 v_0 := v.Args[0] 31215 b := v.Block 31216 // match: (Rsh16x32 <t> x y) 31217 // cond: !shiftIsBounded(v) 31218 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 31219 for { 31220 t := v.Type 31221 x := v_0 31222 y := v_1 31223 if !(!shiftIsBounded(v)) { 31224 break 31225 } 31226 v.reset(OpAMD64SARW) 31227 v.Type = t 31228 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 31229 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 31230 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 31231 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 31232 v3.AuxInt = int32ToAuxInt(16) 31233 v3.AddArg(y) 31234 v2.AddArg(v3) 31235 v1.AddArg(v2) 31236 v0.AddArg2(y, v1) 31237 v.AddArg2(x, v0) 31238 return true 31239 } 31240 // match: (Rsh16x32 x y) 31241 // cond: shiftIsBounded(v) 31242 // result: (SARW x y) 31243 for { 31244 x := v_0 31245 y := v_1 31246 if !(shiftIsBounded(v)) { 31247 break 31248 } 31249 v.reset(OpAMD64SARW) 31250 v.AddArg2(x, y) 31251 return true 31252 } 31253 return false 31254 } 31255 func rewriteValueAMD64_OpRsh16x64(v *Value) bool { 31256 v_1 := v.Args[1] 31257 v_0 := v.Args[0] 31258 b := v.Block 31259 // match: (Rsh16x64 <t> x y) 31260 // cond: !shiftIsBounded(v) 31261 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 31262 for { 31263 t := v.Type 31264 x := v_0 31265 y := v_1 31266 if !(!shiftIsBounded(v)) { 31267 break 31268 } 31269 v.reset(OpAMD64SARW) 31270 v.Type = t 31271 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 31272 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 31273 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 31274 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 31275 v3.AuxInt = int32ToAuxInt(16) 31276 v3.AddArg(y) 31277 v2.AddArg(v3) 31278 v1.AddArg(v2) 31279 v0.AddArg2(y, v1) 31280 v.AddArg2(x, v0) 31281 return true 31282 } 31283 // match: (Rsh16x64 x y) 31284 // cond: shiftIsBounded(v) 31285 // result: (SARW x y) 31286 for { 31287 x := v_0 31288 y := v_1 31289 if !(shiftIsBounded(v)) { 31290 break 31291 } 31292 v.reset(OpAMD64SARW) 31293 v.AddArg2(x, y) 31294 return true 31295 } 31296 return false 31297 } 31298 func rewriteValueAMD64_OpRsh16x8(v *Value) bool { 31299 v_1 := v.Args[1] 31300 v_0 := v.Args[0] 31301 b := v.Block 31302 // match: (Rsh16x8 <t> x y) 31303 // cond: !shiftIsBounded(v) 31304 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 31305 for { 31306 t := v.Type 31307 x := v_0 31308 y := v_1 31309 if !(!shiftIsBounded(v)) { 31310 break 31311 } 31312 v.reset(OpAMD64SARW) 31313 v.Type = t 31314 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 31315 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 31316 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 31317 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 31318 v3.AuxInt = int8ToAuxInt(16) 31319 v3.AddArg(y) 31320 v2.AddArg(v3) 31321 v1.AddArg(v2) 31322 v0.AddArg2(y, v1) 31323 v.AddArg2(x, v0) 31324 return true 31325 } 31326 // match: (Rsh16x8 x y) 31327 // cond: shiftIsBounded(v) 31328 // result: (SARW x y) 31329 for { 31330 x := v_0 31331 y := v_1 31332 if !(shiftIsBounded(v)) { 31333 break 31334 } 31335 v.reset(OpAMD64SARW) 31336 v.AddArg2(x, y) 31337 return true 31338 } 31339 return false 31340 } 31341 func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { 31342 v_1 := v.Args[1] 31343 v_0 := v.Args[0] 31344 b := v.Block 31345 // match: (Rsh32Ux16 <t> x y) 31346 // cond: !shiftIsBounded(v) 31347 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 31348 for { 31349 t := v.Type 31350 x := v_0 31351 y := v_1 31352 if !(!shiftIsBounded(v)) { 31353 break 31354 } 31355 v.reset(OpAMD64ANDL) 31356 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 31357 v0.AddArg2(x, y) 31358 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 31359 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 31360 v2.AuxInt = int16ToAuxInt(32) 31361 v2.AddArg(y) 31362 v1.AddArg(v2) 31363 v.AddArg2(v0, v1) 31364 return true 31365 } 31366 // match: (Rsh32Ux16 x y) 31367 // cond: shiftIsBounded(v) 31368 // result: (SHRL x y) 31369 for { 31370 x := v_0 31371 y := v_1 31372 if !(shiftIsBounded(v)) { 31373 break 31374 } 31375 v.reset(OpAMD64SHRL) 31376 v.AddArg2(x, y) 31377 return true 31378 } 31379 return false 31380 } 31381 func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { 31382 v_1 := v.Args[1] 31383 v_0 := v.Args[0] 31384 b := v.Block 31385 // match: (Rsh32Ux32 <t> x y) 31386 // cond: !shiftIsBounded(v) 31387 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 31388 for { 31389 t := v.Type 31390 x := v_0 31391 y := v_1 31392 if !(!shiftIsBounded(v)) { 31393 break 31394 } 31395 v.reset(OpAMD64ANDL) 31396 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 31397 v0.AddArg2(x, y) 31398 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 31399 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 31400 v2.AuxInt = int32ToAuxInt(32) 31401 v2.AddArg(y) 31402 v1.AddArg(v2) 31403 v.AddArg2(v0, v1) 31404 return true 31405 } 31406 // match: (Rsh32Ux32 x y) 31407 // cond: shiftIsBounded(v) 31408 // result: (SHRL x y) 31409 for { 31410 x := v_0 31411 y := v_1 31412 if !(shiftIsBounded(v)) { 31413 break 31414 } 31415 v.reset(OpAMD64SHRL) 31416 v.AddArg2(x, y) 31417 return true 31418 } 31419 return false 31420 } 31421 func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { 31422 v_1 := v.Args[1] 31423 v_0 := v.Args[0] 31424 b := v.Block 31425 // match: (Rsh32Ux64 <t> x y) 31426 // cond: !shiftIsBounded(v) 31427 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 31428 for { 31429 t := v.Type 31430 x := v_0 31431 y := v_1 31432 if !(!shiftIsBounded(v)) { 31433 break 31434 } 31435 v.reset(OpAMD64ANDL) 31436 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 31437 v0.AddArg2(x, y) 31438 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 31439 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 31440 v2.AuxInt = int32ToAuxInt(32) 31441 v2.AddArg(y) 31442 v1.AddArg(v2) 31443 v.AddArg2(v0, v1) 31444 return true 31445 } 31446 // match: (Rsh32Ux64 x y) 31447 // cond: shiftIsBounded(v) 31448 // result: (SHRL x y) 31449 for { 31450 x := v_0 31451 y := v_1 31452 if !(shiftIsBounded(v)) { 31453 break 31454 } 31455 v.reset(OpAMD64SHRL) 31456 v.AddArg2(x, y) 31457 return true 31458 } 31459 return false 31460 } 31461 func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { 31462 v_1 := v.Args[1] 31463 v_0 := v.Args[0] 31464 b := v.Block 31465 // match: (Rsh32Ux8 <t> x y) 31466 // cond: !shiftIsBounded(v) 31467 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 31468 for { 31469 t := v.Type 31470 x := v_0 31471 y := v_1 31472 if !(!shiftIsBounded(v)) { 31473 break 31474 } 31475 v.reset(OpAMD64ANDL) 31476 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 31477 v0.AddArg2(x, y) 31478 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 31479 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 31480 v2.AuxInt = int8ToAuxInt(32) 31481 v2.AddArg(y) 31482 v1.AddArg(v2) 31483 v.AddArg2(v0, v1) 31484 return true 31485 } 31486 // match: (Rsh32Ux8 x y) 31487 // cond: shiftIsBounded(v) 31488 // result: (SHRL x y) 31489 for { 31490 x := v_0 31491 y := v_1 31492 if !(shiftIsBounded(v)) { 31493 break 31494 } 31495 v.reset(OpAMD64SHRL) 31496 v.AddArg2(x, y) 31497 return true 31498 } 31499 return false 31500 } 31501 func rewriteValueAMD64_OpRsh32x16(v *Value) bool { 31502 v_1 := v.Args[1] 31503 v_0 := v.Args[0] 31504 b := v.Block 31505 // match: (Rsh32x16 <t> x y) 31506 // cond: !shiftIsBounded(v) 31507 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 31508 for { 31509 t := v.Type 31510 x := v_0 31511 y := v_1 31512 if !(!shiftIsBounded(v)) { 31513 break 31514 } 31515 v.reset(OpAMD64SARL) 31516 v.Type = t 31517 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 31518 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 31519 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 31520 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 31521 v3.AuxInt = int16ToAuxInt(32) 31522 v3.AddArg(y) 31523 v2.AddArg(v3) 31524 v1.AddArg(v2) 31525 v0.AddArg2(y, v1) 31526 v.AddArg2(x, v0) 31527 return true 31528 } 31529 // match: (Rsh32x16 x y) 31530 // cond: shiftIsBounded(v) 31531 // result: (SARL x y) 31532 for { 31533 x := v_0 31534 y := v_1 31535 if !(shiftIsBounded(v)) { 31536 break 31537 } 31538 v.reset(OpAMD64SARL) 31539 v.AddArg2(x, y) 31540 return true 31541 } 31542 return false 31543 } 31544 func rewriteValueAMD64_OpRsh32x32(v *Value) bool { 31545 v_1 := v.Args[1] 31546 v_0 := v.Args[0] 31547 b := v.Block 31548 // match: (Rsh32x32 <t> x y) 31549 // cond: !shiftIsBounded(v) 31550 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 31551 for { 31552 t := v.Type 31553 x := v_0 31554 y := v_1 31555 if !(!shiftIsBounded(v)) { 31556 break 31557 } 31558 v.reset(OpAMD64SARL) 31559 v.Type = t 31560 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 31561 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 31562 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 31563 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 31564 v3.AuxInt = int32ToAuxInt(32) 31565 v3.AddArg(y) 31566 v2.AddArg(v3) 31567 v1.AddArg(v2) 31568 v0.AddArg2(y, v1) 31569 v.AddArg2(x, v0) 31570 return true 31571 } 31572 // match: (Rsh32x32 x y) 31573 // cond: shiftIsBounded(v) 31574 // result: (SARL x y) 31575 for { 31576 x := v_0 31577 y := v_1 31578 if !(shiftIsBounded(v)) { 31579 break 31580 } 31581 v.reset(OpAMD64SARL) 31582 v.AddArg2(x, y) 31583 return true 31584 } 31585 return false 31586 } 31587 func rewriteValueAMD64_OpRsh32x64(v *Value) bool { 31588 v_1 := v.Args[1] 31589 v_0 := v.Args[0] 31590 b := v.Block 31591 // match: (Rsh32x64 <t> x y) 31592 // cond: !shiftIsBounded(v) 31593 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 31594 for { 31595 t := v.Type 31596 x := v_0 31597 y := v_1 31598 if !(!shiftIsBounded(v)) { 31599 break 31600 } 31601 v.reset(OpAMD64SARL) 31602 v.Type = t 31603 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 31604 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 31605 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 31606 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 31607 v3.AuxInt = int32ToAuxInt(32) 31608 v3.AddArg(y) 31609 v2.AddArg(v3) 31610 v1.AddArg(v2) 31611 v0.AddArg2(y, v1) 31612 v.AddArg2(x, v0) 31613 return true 31614 } 31615 // match: (Rsh32x64 x y) 31616 // cond: shiftIsBounded(v) 31617 // result: (SARL x y) 31618 for { 31619 x := v_0 31620 y := v_1 31621 if !(shiftIsBounded(v)) { 31622 break 31623 } 31624 v.reset(OpAMD64SARL) 31625 v.AddArg2(x, y) 31626 return true 31627 } 31628 return false 31629 } 31630 func rewriteValueAMD64_OpRsh32x8(v *Value) bool { 31631 v_1 := v.Args[1] 31632 v_0 := v.Args[0] 31633 b := v.Block 31634 // match: (Rsh32x8 <t> x y) 31635 // cond: !shiftIsBounded(v) 31636 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 31637 for { 31638 t := v.Type 31639 x := v_0 31640 y := v_1 31641 if !(!shiftIsBounded(v)) { 31642 break 31643 } 31644 v.reset(OpAMD64SARL) 31645 v.Type = t 31646 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 31647 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 31648 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 31649 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 31650 v3.AuxInt = int8ToAuxInt(32) 31651 v3.AddArg(y) 31652 v2.AddArg(v3) 31653 v1.AddArg(v2) 31654 v0.AddArg2(y, v1) 31655 v.AddArg2(x, v0) 31656 return true 31657 } 31658 // match: (Rsh32x8 x y) 31659 // cond: shiftIsBounded(v) 31660 // result: (SARL x y) 31661 for { 31662 x := v_0 31663 y := v_1 31664 if !(shiftIsBounded(v)) { 31665 break 31666 } 31667 v.reset(OpAMD64SARL) 31668 v.AddArg2(x, y) 31669 return true 31670 } 31671 return false 31672 } 31673 func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { 31674 v_1 := v.Args[1] 31675 v_0 := v.Args[0] 31676 b := v.Block 31677 // match: (Rsh64Ux16 <t> x y) 31678 // cond: !shiftIsBounded(v) 31679 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 31680 for { 31681 t := v.Type 31682 x := v_0 31683 y := v_1 31684 if !(!shiftIsBounded(v)) { 31685 break 31686 } 31687 v.reset(OpAMD64ANDQ) 31688 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 31689 v0.AddArg2(x, y) 31690 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 31691 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 31692 v2.AuxInt = int16ToAuxInt(64) 31693 v2.AddArg(y) 31694 v1.AddArg(v2) 31695 v.AddArg2(v0, v1) 31696 return true 31697 } 31698 // match: (Rsh64Ux16 x y) 31699 // cond: shiftIsBounded(v) 31700 // result: (SHRQ x y) 31701 for { 31702 x := v_0 31703 y := v_1 31704 if !(shiftIsBounded(v)) { 31705 break 31706 } 31707 v.reset(OpAMD64SHRQ) 31708 v.AddArg2(x, y) 31709 return true 31710 } 31711 return false 31712 } 31713 func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { 31714 v_1 := v.Args[1] 31715 v_0 := v.Args[0] 31716 b := v.Block 31717 // match: (Rsh64Ux32 <t> x y) 31718 // cond: !shiftIsBounded(v) 31719 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 31720 for { 31721 t := v.Type 31722 x := v_0 31723 y := v_1 31724 if !(!shiftIsBounded(v)) { 31725 break 31726 } 31727 v.reset(OpAMD64ANDQ) 31728 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 31729 v0.AddArg2(x, y) 31730 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 31731 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 31732 v2.AuxInt = int32ToAuxInt(64) 31733 v2.AddArg(y) 31734 v1.AddArg(v2) 31735 v.AddArg2(v0, v1) 31736 return true 31737 } 31738 // match: (Rsh64Ux32 x y) 31739 // cond: shiftIsBounded(v) 31740 // result: (SHRQ x y) 31741 for { 31742 x := v_0 31743 y := v_1 31744 if !(shiftIsBounded(v)) { 31745 break 31746 } 31747 v.reset(OpAMD64SHRQ) 31748 v.AddArg2(x, y) 31749 return true 31750 } 31751 return false 31752 } 31753 func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { 31754 v_1 := v.Args[1] 31755 v_0 := v.Args[0] 31756 b := v.Block 31757 // match: (Rsh64Ux64 <t> x y) 31758 // cond: !shiftIsBounded(v) 31759 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 31760 for { 31761 t := v.Type 31762 x := v_0 31763 y := v_1 31764 if !(!shiftIsBounded(v)) { 31765 break 31766 } 31767 v.reset(OpAMD64ANDQ) 31768 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 31769 v0.AddArg2(x, y) 31770 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 31771 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 31772 v2.AuxInt = int32ToAuxInt(64) 31773 v2.AddArg(y) 31774 v1.AddArg(v2) 31775 v.AddArg2(v0, v1) 31776 return true 31777 } 31778 // match: (Rsh64Ux64 x y) 31779 // cond: shiftIsBounded(v) 31780 // result: (SHRQ x y) 31781 for { 31782 x := v_0 31783 y := v_1 31784 if !(shiftIsBounded(v)) { 31785 break 31786 } 31787 v.reset(OpAMD64SHRQ) 31788 v.AddArg2(x, y) 31789 return true 31790 } 31791 return false 31792 } 31793 func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { 31794 v_1 := v.Args[1] 31795 v_0 := v.Args[0] 31796 b := v.Block 31797 // match: (Rsh64Ux8 <t> x y) 31798 // cond: !shiftIsBounded(v) 31799 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 31800 for { 31801 t := v.Type 31802 x := v_0 31803 y := v_1 31804 if !(!shiftIsBounded(v)) { 31805 break 31806 } 31807 v.reset(OpAMD64ANDQ) 31808 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 31809 v0.AddArg2(x, y) 31810 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 31811 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 31812 v2.AuxInt = int8ToAuxInt(64) 31813 v2.AddArg(y) 31814 v1.AddArg(v2) 31815 v.AddArg2(v0, v1) 31816 return true 31817 } 31818 // match: (Rsh64Ux8 x y) 31819 // cond: shiftIsBounded(v) 31820 // result: (SHRQ x y) 31821 for { 31822 x := v_0 31823 y := v_1 31824 if !(shiftIsBounded(v)) { 31825 break 31826 } 31827 v.reset(OpAMD64SHRQ) 31828 v.AddArg2(x, y) 31829 return true 31830 } 31831 return false 31832 } 31833 func rewriteValueAMD64_OpRsh64x16(v *Value) bool { 31834 v_1 := v.Args[1] 31835 v_0 := v.Args[0] 31836 b := v.Block 31837 // match: (Rsh64x16 <t> x y) 31838 // cond: !shiftIsBounded(v) 31839 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 31840 for { 31841 t := v.Type 31842 x := v_0 31843 y := v_1 31844 if !(!shiftIsBounded(v)) { 31845 break 31846 } 31847 v.reset(OpAMD64SARQ) 31848 v.Type = t 31849 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 31850 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 31851 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 31852 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 31853 v3.AuxInt = int16ToAuxInt(64) 31854 v3.AddArg(y) 31855 v2.AddArg(v3) 31856 v1.AddArg(v2) 31857 v0.AddArg2(y, v1) 31858 v.AddArg2(x, v0) 31859 return true 31860 } 31861 // match: (Rsh64x16 x y) 31862 // cond: shiftIsBounded(v) 31863 // result: (SARQ x y) 31864 for { 31865 x := v_0 31866 y := v_1 31867 if !(shiftIsBounded(v)) { 31868 break 31869 } 31870 v.reset(OpAMD64SARQ) 31871 v.AddArg2(x, y) 31872 return true 31873 } 31874 return false 31875 } 31876 func rewriteValueAMD64_OpRsh64x32(v *Value) bool { 31877 v_1 := v.Args[1] 31878 v_0 := v.Args[0] 31879 b := v.Block 31880 // match: (Rsh64x32 <t> x y) 31881 // cond: !shiftIsBounded(v) 31882 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 31883 for { 31884 t := v.Type 31885 x := v_0 31886 y := v_1 31887 if !(!shiftIsBounded(v)) { 31888 break 31889 } 31890 v.reset(OpAMD64SARQ) 31891 v.Type = t 31892 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 31893 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 31894 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 31895 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 31896 v3.AuxInt = int32ToAuxInt(64) 31897 v3.AddArg(y) 31898 v2.AddArg(v3) 31899 v1.AddArg(v2) 31900 v0.AddArg2(y, v1) 31901 v.AddArg2(x, v0) 31902 return true 31903 } 31904 // match: (Rsh64x32 x y) 31905 // cond: shiftIsBounded(v) 31906 // result: (SARQ x y) 31907 for { 31908 x := v_0 31909 y := v_1 31910 if !(shiftIsBounded(v)) { 31911 break 31912 } 31913 v.reset(OpAMD64SARQ) 31914 v.AddArg2(x, y) 31915 return true 31916 } 31917 return false 31918 } 31919 func rewriteValueAMD64_OpRsh64x64(v *Value) bool { 31920 v_1 := v.Args[1] 31921 v_0 := v.Args[0] 31922 b := v.Block 31923 // match: (Rsh64x64 <t> x y) 31924 // cond: !shiftIsBounded(v) 31925 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 31926 for { 31927 t := v.Type 31928 x := v_0 31929 y := v_1 31930 if !(!shiftIsBounded(v)) { 31931 break 31932 } 31933 v.reset(OpAMD64SARQ) 31934 v.Type = t 31935 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 31936 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 31937 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 31938 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 31939 v3.AuxInt = int32ToAuxInt(64) 31940 v3.AddArg(y) 31941 v2.AddArg(v3) 31942 v1.AddArg(v2) 31943 v0.AddArg2(y, v1) 31944 v.AddArg2(x, v0) 31945 return true 31946 } 31947 // match: (Rsh64x64 x y) 31948 // cond: shiftIsBounded(v) 31949 // result: (SARQ x y) 31950 for { 31951 x := v_0 31952 y := v_1 31953 if !(shiftIsBounded(v)) { 31954 break 31955 } 31956 v.reset(OpAMD64SARQ) 31957 v.AddArg2(x, y) 31958 return true 31959 } 31960 return false 31961 } 31962 func rewriteValueAMD64_OpRsh64x8(v *Value) bool { 31963 v_1 := v.Args[1] 31964 v_0 := v.Args[0] 31965 b := v.Block 31966 // match: (Rsh64x8 <t> x y) 31967 // cond: !shiftIsBounded(v) 31968 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 31969 for { 31970 t := v.Type 31971 x := v_0 31972 y := v_1 31973 if !(!shiftIsBounded(v)) { 31974 break 31975 } 31976 v.reset(OpAMD64SARQ) 31977 v.Type = t 31978 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 31979 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 31980 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 31981 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 31982 v3.AuxInt = int8ToAuxInt(64) 31983 v3.AddArg(y) 31984 v2.AddArg(v3) 31985 v1.AddArg(v2) 31986 v0.AddArg2(y, v1) 31987 v.AddArg2(x, v0) 31988 return true 31989 } 31990 // match: (Rsh64x8 x y) 31991 // cond: shiftIsBounded(v) 31992 // result: (SARQ x y) 31993 for { 31994 x := v_0 31995 y := v_1 31996 if !(shiftIsBounded(v)) { 31997 break 31998 } 31999 v.reset(OpAMD64SARQ) 32000 v.AddArg2(x, y) 32001 return true 32002 } 32003 return false 32004 } 32005 func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { 32006 v_1 := v.Args[1] 32007 v_0 := v.Args[0] 32008 b := v.Block 32009 // match: (Rsh8Ux16 <t> x y) 32010 // cond: !shiftIsBounded(v) 32011 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 32012 for { 32013 t := v.Type 32014 x := v_0 32015 y := v_1 32016 if !(!shiftIsBounded(v)) { 32017 break 32018 } 32019 v.reset(OpAMD64ANDL) 32020 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 32021 v0.AddArg2(x, y) 32022 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32023 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 32024 v2.AuxInt = int16ToAuxInt(8) 32025 v2.AddArg(y) 32026 v1.AddArg(v2) 32027 v.AddArg2(v0, v1) 32028 return true 32029 } 32030 // match: (Rsh8Ux16 x y) 32031 // cond: shiftIsBounded(v) 32032 // result: (SHRB x y) 32033 for { 32034 x := v_0 32035 y := v_1 32036 if !(shiftIsBounded(v)) { 32037 break 32038 } 32039 v.reset(OpAMD64SHRB) 32040 v.AddArg2(x, y) 32041 return true 32042 } 32043 return false 32044 } 32045 func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { 32046 v_1 := v.Args[1] 32047 v_0 := v.Args[0] 32048 b := v.Block 32049 // match: (Rsh8Ux32 <t> x y) 32050 // cond: !shiftIsBounded(v) 32051 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 32052 for { 32053 t := v.Type 32054 x := v_0 32055 y := v_1 32056 if !(!shiftIsBounded(v)) { 32057 break 32058 } 32059 v.reset(OpAMD64ANDL) 32060 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 32061 v0.AddArg2(x, y) 32062 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32063 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 32064 v2.AuxInt = int32ToAuxInt(8) 32065 v2.AddArg(y) 32066 v1.AddArg(v2) 32067 v.AddArg2(v0, v1) 32068 return true 32069 } 32070 // match: (Rsh8Ux32 x y) 32071 // cond: shiftIsBounded(v) 32072 // result: (SHRB x y) 32073 for { 32074 x := v_0 32075 y := v_1 32076 if !(shiftIsBounded(v)) { 32077 break 32078 } 32079 v.reset(OpAMD64SHRB) 32080 v.AddArg2(x, y) 32081 return true 32082 } 32083 return false 32084 } 32085 func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { 32086 v_1 := v.Args[1] 32087 v_0 := v.Args[0] 32088 b := v.Block 32089 // match: (Rsh8Ux64 <t> x y) 32090 // cond: !shiftIsBounded(v) 32091 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 32092 for { 32093 t := v.Type 32094 x := v_0 32095 y := v_1 32096 if !(!shiftIsBounded(v)) { 32097 break 32098 } 32099 v.reset(OpAMD64ANDL) 32100 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 32101 v0.AddArg2(x, y) 32102 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32103 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 32104 v2.AuxInt = int32ToAuxInt(8) 32105 v2.AddArg(y) 32106 v1.AddArg(v2) 32107 v.AddArg2(v0, v1) 32108 return true 32109 } 32110 // match: (Rsh8Ux64 x y) 32111 // cond: shiftIsBounded(v) 32112 // result: (SHRB x y) 32113 for { 32114 x := v_0 32115 y := v_1 32116 if !(shiftIsBounded(v)) { 32117 break 32118 } 32119 v.reset(OpAMD64SHRB) 32120 v.AddArg2(x, y) 32121 return true 32122 } 32123 return false 32124 } 32125 func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { 32126 v_1 := v.Args[1] 32127 v_0 := v.Args[0] 32128 b := v.Block 32129 // match: (Rsh8Ux8 <t> x y) 32130 // cond: !shiftIsBounded(v) 32131 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 32132 for { 32133 t := v.Type 32134 x := v_0 32135 y := v_1 32136 if !(!shiftIsBounded(v)) { 32137 break 32138 } 32139 v.reset(OpAMD64ANDL) 32140 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 32141 v0.AddArg2(x, y) 32142 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 32143 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 32144 v2.AuxInt = int8ToAuxInt(8) 32145 v2.AddArg(y) 32146 v1.AddArg(v2) 32147 v.AddArg2(v0, v1) 32148 return true 32149 } 32150 // match: (Rsh8Ux8 x y) 32151 // cond: shiftIsBounded(v) 32152 // result: (SHRB x y) 32153 for { 32154 x := v_0 32155 y := v_1 32156 if !(shiftIsBounded(v)) { 32157 break 32158 } 32159 v.reset(OpAMD64SHRB) 32160 v.AddArg2(x, y) 32161 return true 32162 } 32163 return false 32164 } 32165 func rewriteValueAMD64_OpRsh8x16(v *Value) bool { 32166 v_1 := v.Args[1] 32167 v_0 := v.Args[0] 32168 b := v.Block 32169 // match: (Rsh8x16 <t> x y) 32170 // cond: !shiftIsBounded(v) 32171 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 32172 for { 32173 t := v.Type 32174 x := v_0 32175 y := v_1 32176 if !(!shiftIsBounded(v)) { 32177 break 32178 } 32179 v.reset(OpAMD64SARB) 32180 v.Type = t 32181 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 32182 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 32183 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 32184 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 32185 v3.AuxInt = int16ToAuxInt(8) 32186 v3.AddArg(y) 32187 v2.AddArg(v3) 32188 v1.AddArg(v2) 32189 v0.AddArg2(y, v1) 32190 v.AddArg2(x, v0) 32191 return true 32192 } 32193 // match: (Rsh8x16 x y) 32194 // cond: shiftIsBounded(v) 32195 // result: (SARB x y) 32196 for { 32197 x := v_0 32198 y := v_1 32199 if !(shiftIsBounded(v)) { 32200 break 32201 } 32202 v.reset(OpAMD64SARB) 32203 v.AddArg2(x, y) 32204 return true 32205 } 32206 return false 32207 } 32208 func rewriteValueAMD64_OpRsh8x32(v *Value) bool { 32209 v_1 := v.Args[1] 32210 v_0 := v.Args[0] 32211 b := v.Block 32212 // match: (Rsh8x32 <t> x y) 32213 // cond: !shiftIsBounded(v) 32214 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 32215 for { 32216 t := v.Type 32217 x := v_0 32218 y := v_1 32219 if !(!shiftIsBounded(v)) { 32220 break 32221 } 32222 v.reset(OpAMD64SARB) 32223 v.Type = t 32224 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 32225 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 32226 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 32227 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 32228 v3.AuxInt = int32ToAuxInt(8) 32229 v3.AddArg(y) 32230 v2.AddArg(v3) 32231 v1.AddArg(v2) 32232 v0.AddArg2(y, v1) 32233 v.AddArg2(x, v0) 32234 return true 32235 } 32236 // match: (Rsh8x32 x y) 32237 // cond: shiftIsBounded(v) 32238 // result: (SARB x y) 32239 for { 32240 x := v_0 32241 y := v_1 32242 if !(shiftIsBounded(v)) { 32243 break 32244 } 32245 v.reset(OpAMD64SARB) 32246 v.AddArg2(x, y) 32247 return true 32248 } 32249 return false 32250 } 32251 func rewriteValueAMD64_OpRsh8x64(v *Value) bool { 32252 v_1 := v.Args[1] 32253 v_0 := v.Args[0] 32254 b := v.Block 32255 // match: (Rsh8x64 <t> x y) 32256 // cond: !shiftIsBounded(v) 32257 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 32258 for { 32259 t := v.Type 32260 x := v_0 32261 y := v_1 32262 if !(!shiftIsBounded(v)) { 32263 break 32264 } 32265 v.reset(OpAMD64SARB) 32266 v.Type = t 32267 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 32268 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 32269 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 32270 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 32271 v3.AuxInt = int32ToAuxInt(8) 32272 v3.AddArg(y) 32273 v2.AddArg(v3) 32274 v1.AddArg(v2) 32275 v0.AddArg2(y, v1) 32276 v.AddArg2(x, v0) 32277 return true 32278 } 32279 // match: (Rsh8x64 x y) 32280 // cond: shiftIsBounded(v) 32281 // result: (SARB x y) 32282 for { 32283 x := v_0 32284 y := v_1 32285 if !(shiftIsBounded(v)) { 32286 break 32287 } 32288 v.reset(OpAMD64SARB) 32289 v.AddArg2(x, y) 32290 return true 32291 } 32292 return false 32293 } 32294 func rewriteValueAMD64_OpRsh8x8(v *Value) bool { 32295 v_1 := v.Args[1] 32296 v_0 := v.Args[0] 32297 b := v.Block 32298 // match: (Rsh8x8 <t> x y) 32299 // cond: !shiftIsBounded(v) 32300 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 32301 for { 32302 t := v.Type 32303 x := v_0 32304 y := v_1 32305 if !(!shiftIsBounded(v)) { 32306 break 32307 } 32308 v.reset(OpAMD64SARB) 32309 v.Type = t 32310 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 32311 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 32312 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 32313 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 32314 v3.AuxInt = int8ToAuxInt(8) 32315 v3.AddArg(y) 32316 v2.AddArg(v3) 32317 v1.AddArg(v2) 32318 v0.AddArg2(y, v1) 32319 v.AddArg2(x, v0) 32320 return true 32321 } 32322 // match: (Rsh8x8 x y) 32323 // cond: shiftIsBounded(v) 32324 // result: (SARB x y) 32325 for { 32326 x := v_0 32327 y := v_1 32328 if !(shiftIsBounded(v)) { 32329 break 32330 } 32331 v.reset(OpAMD64SARB) 32332 v.AddArg2(x, y) 32333 return true 32334 } 32335 return false 32336 } 32337 func rewriteValueAMD64_OpSelect0(v *Value) bool { 32338 v_0 := v.Args[0] 32339 b := v.Block 32340 typ := &b.Func.Config.Types 32341 // match: (Select0 (Mul64uover x y)) 32342 // result: (Select0 <typ.UInt64> (MULQU x y)) 32343 for { 32344 if v_0.Op != OpMul64uover { 32345 break 32346 } 32347 y := v_0.Args[1] 32348 x := v_0.Args[0] 32349 v.reset(OpSelect0) 32350 v.Type = typ.UInt64 32351 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) 32352 v0.AddArg2(x, y) 32353 v.AddArg(v0) 32354 return true 32355 } 32356 // match: (Select0 (Mul32uover x y)) 32357 // result: (Select0 <typ.UInt32> (MULLU x y)) 32358 for { 32359 if v_0.Op != OpMul32uover { 32360 break 32361 } 32362 y := v_0.Args[1] 32363 x := v_0.Args[0] 32364 v.reset(OpSelect0) 32365 v.Type = typ.UInt32 32366 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) 32367 v0.AddArg2(x, y) 32368 v.AddArg(v0) 32369 return true 32370 } 32371 // match: (Select0 (Add64carry x y c)) 32372 // result: (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c)))) 32373 for { 32374 if v_0.Op != OpAdd64carry { 32375 break 32376 } 32377 c := v_0.Args[2] 32378 x := v_0.Args[0] 32379 y := v_0.Args[1] 32380 v.reset(OpSelect0) 32381 v.Type = typ.UInt64 32382 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 32383 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 32384 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 32385 v2.AddArg(c) 32386 v1.AddArg(v2) 32387 v0.AddArg3(x, y, v1) 32388 v.AddArg(v0) 32389 return true 32390 } 32391 // match: (Select0 (Sub64borrow x y c)) 32392 // result: (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c)))) 32393 for { 32394 if v_0.Op != OpSub64borrow { 32395 break 32396 } 32397 c := v_0.Args[2] 32398 x := v_0.Args[0] 32399 y := v_0.Args[1] 32400 v.reset(OpSelect0) 32401 v.Type = typ.UInt64 32402 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 32403 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 32404 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 32405 v2.AddArg(c) 32406 v1.AddArg(v2) 32407 v0.AddArg3(x, y, v1) 32408 v.AddArg(v0) 32409 return true 32410 } 32411 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 32412 // result: (ADDL val (Select0 <t> tuple)) 32413 for { 32414 t := v.Type 32415 if v_0.Op != OpAMD64AddTupleFirst32 { 32416 break 32417 } 32418 tuple := v_0.Args[1] 32419 val := v_0.Args[0] 32420 v.reset(OpAMD64ADDL) 32421 v0 := b.NewValue0(v.Pos, OpSelect0, t) 32422 v0.AddArg(tuple) 32423 v.AddArg2(val, v0) 32424 return true 32425 } 32426 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 32427 // result: (ADDQ val (Select0 <t> tuple)) 32428 for { 32429 t := v.Type 32430 if v_0.Op != OpAMD64AddTupleFirst64 { 32431 break 32432 } 32433 tuple := v_0.Args[1] 32434 val := v_0.Args[0] 32435 v.reset(OpAMD64ADDQ) 32436 v0 := b.NewValue0(v.Pos, OpSelect0, t) 32437 v0.AddArg(tuple) 32438 v.AddArg2(val, v0) 32439 return true 32440 } 32441 return false 32442 } 32443 func rewriteValueAMD64_OpSelect1(v *Value) bool { 32444 v_0 := v.Args[0] 32445 b := v.Block 32446 typ := &b.Func.Config.Types 32447 // match: (Select1 (Mul64uover x y)) 32448 // result: (SETO (Select1 <types.TypeFlags> (MULQU x y))) 32449 for { 32450 if v_0.Op != OpMul64uover { 32451 break 32452 } 32453 y := v_0.Args[1] 32454 x := v_0.Args[0] 32455 v.reset(OpAMD64SETO) 32456 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 32457 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) 32458 v1.AddArg2(x, y) 32459 v0.AddArg(v1) 32460 v.AddArg(v0) 32461 return true 32462 } 32463 // match: (Select1 (Mul32uover x y)) 32464 // result: (SETO (Select1 <types.TypeFlags> (MULLU x y))) 32465 for { 32466 if v_0.Op != OpMul32uover { 32467 break 32468 } 32469 y := v_0.Args[1] 32470 x := v_0.Args[0] 32471 v.reset(OpAMD64SETO) 32472 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 32473 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) 32474 v1.AddArg2(x, y) 32475 v0.AddArg(v1) 32476 v.AddArg(v0) 32477 return true 32478 } 32479 // match: (Select1 (Add64carry x y c)) 32480 // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c)))))) 32481 for { 32482 if v_0.Op != OpAdd64carry { 32483 break 32484 } 32485 c := v_0.Args[2] 32486 x := v_0.Args[0] 32487 y := v_0.Args[1] 32488 v.reset(OpAMD64NEGQ) 32489 v.Type = typ.UInt64 32490 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) 32491 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 32492 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 32493 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 32494 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 32495 v4.AddArg(c) 32496 v3.AddArg(v4) 32497 v2.AddArg3(x, y, v3) 32498 v1.AddArg(v2) 32499 v0.AddArg(v1) 32500 v.AddArg(v0) 32501 return true 32502 } 32503 // match: (Select1 (Sub64borrow x y c)) 32504 // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c)))))) 32505 for { 32506 if v_0.Op != OpSub64borrow { 32507 break 32508 } 32509 c := v_0.Args[2] 32510 x := v_0.Args[0] 32511 y := v_0.Args[1] 32512 v.reset(OpAMD64NEGQ) 32513 v.Type = typ.UInt64 32514 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) 32515 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 32516 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 32517 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 32518 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 32519 v4.AddArg(c) 32520 v3.AddArg(v4) 32521 v2.AddArg3(x, y, v3) 32522 v1.AddArg(v2) 32523 v0.AddArg(v1) 32524 v.AddArg(v0) 32525 return true 32526 } 32527 // match: (Select1 (NEGLflags (MOVQconst [0]))) 32528 // result: (FlagEQ) 32529 for { 32530 if v_0.Op != OpAMD64NEGLflags { 32531 break 32532 } 32533 v_0_0 := v_0.Args[0] 32534 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 { 32535 break 32536 } 32537 v.reset(OpAMD64FlagEQ) 32538 return true 32539 } 32540 // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) 32541 // result: x 32542 for { 32543 if v_0.Op != OpAMD64NEGLflags { 32544 break 32545 } 32546 v_0_0 := v_0.Args[0] 32547 if v_0_0.Op != OpAMD64NEGQ { 32548 break 32549 } 32550 v_0_0_0 := v_0_0.Args[0] 32551 if v_0_0_0.Op != OpAMD64SBBQcarrymask { 32552 break 32553 } 32554 x := v_0_0_0.Args[0] 32555 v.copyOf(x) 32556 return true 32557 } 32558 // match: (Select1 (AddTupleFirst32 _ tuple)) 32559 // result: (Select1 tuple) 32560 for { 32561 if v_0.Op != OpAMD64AddTupleFirst32 { 32562 break 32563 } 32564 tuple := v_0.Args[1] 32565 v.reset(OpSelect1) 32566 v.AddArg(tuple) 32567 return true 32568 } 32569 // match: (Select1 (AddTupleFirst64 _ tuple)) 32570 // result: (Select1 tuple) 32571 for { 32572 if v_0.Op != OpAMD64AddTupleFirst64 { 32573 break 32574 } 32575 tuple := v_0.Args[1] 32576 v.reset(OpSelect1) 32577 v.AddArg(tuple) 32578 return true 32579 } 32580 return false 32581 } 32582 func rewriteValueAMD64_OpSelectN(v *Value) bool { 32583 v_0 := v.Args[0] 32584 b := v.Block 32585 config := b.Func.Config 32586 // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem))))) 32587 // cond: sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call) 32588 // result: (Move [sc.Val64()] dst src mem) 32589 for { 32590 if auxIntToInt64(v.AuxInt) != 0 { 32591 break 32592 } 32593 call := v_0 32594 if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 { 32595 break 32596 } 32597 sym := auxToCall(call.Aux) 32598 s1 := call.Args[0] 32599 if s1.Op != OpAMD64MOVQstoreconst { 32600 break 32601 } 32602 sc := auxIntToValAndOff(s1.AuxInt) 32603 _ = s1.Args[1] 32604 s2 := s1.Args[1] 32605 if s2.Op != OpAMD64MOVQstore { 32606 break 32607 } 32608 _ = s2.Args[2] 32609 src := s2.Args[1] 32610 s3 := s2.Args[2] 32611 if s3.Op != OpAMD64MOVQstore { 32612 break 32613 } 32614 mem := s3.Args[2] 32615 dst := s3.Args[1] 32616 if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) { 32617 break 32618 } 32619 v.reset(OpMove) 32620 v.AuxInt = int64ToAuxInt(sc.Val64()) 32621 v.AddArg3(dst, src, mem) 32622 return true 32623 } 32624 // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem)) 32625 // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call) 32626 // result: (Move [sz] dst src mem) 32627 for { 32628 if auxIntToInt64(v.AuxInt) != 0 { 32629 break 32630 } 32631 call := v_0 32632 if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 { 32633 break 32634 } 32635 sym := auxToCall(call.Aux) 32636 mem := call.Args[3] 32637 dst := call.Args[0] 32638 src := call.Args[1] 32639 call_2 := call.Args[2] 32640 if call_2.Op != OpAMD64MOVQconst { 32641 break 32642 } 32643 sz := auxIntToInt64(call_2.AuxInt) 32644 if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) { 32645 break 32646 } 32647 v.reset(OpMove) 32648 v.AuxInt = int64ToAuxInt(sz) 32649 v.AddArg3(dst, src, mem) 32650 return true 32651 } 32652 return false 32653 } 32654 func rewriteValueAMD64_OpSlicemask(v *Value) bool { 32655 v_0 := v.Args[0] 32656 b := v.Block 32657 // match: (Slicemask <t> x) 32658 // result: (SARQconst (NEGQ <t> x) [63]) 32659 for { 32660 t := v.Type 32661 x := v_0 32662 v.reset(OpAMD64SARQconst) 32663 v.AuxInt = int8ToAuxInt(63) 32664 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 32665 v0.AddArg(x) 32666 v.AddArg(v0) 32667 return true 32668 } 32669 } 32670 func rewriteValueAMD64_OpSpectreIndex(v *Value) bool { 32671 v_1 := v.Args[1] 32672 v_0 := v.Args[0] 32673 b := v.Block 32674 typ := &b.Func.Config.Types 32675 // match: (SpectreIndex <t> x y) 32676 // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y)) 32677 for { 32678 x := v_0 32679 y := v_1 32680 v.reset(OpAMD64CMOVQCC) 32681 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 32682 v0.AuxInt = int64ToAuxInt(0) 32683 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 32684 v1.AddArg2(x, y) 32685 v.AddArg3(x, v0, v1) 32686 return true 32687 } 32688 } 32689 func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { 32690 v_1 := v.Args[1] 32691 v_0 := v.Args[0] 32692 b := v.Block 32693 typ := &b.Func.Config.Types 32694 // match: (SpectreSliceIndex <t> x y) 32695 // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) 32696 for { 32697 x := v_0 32698 y := v_1 32699 v.reset(OpAMD64CMOVQHI) 32700 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 32701 v0.AuxInt = int64ToAuxInt(0) 32702 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 32703 v1.AddArg2(x, y) 32704 v.AddArg3(x, v0, v1) 32705 return true 32706 } 32707 } 32708 func rewriteValueAMD64_OpStore(v *Value) bool { 32709 v_2 := v.Args[2] 32710 v_1 := v.Args[1] 32711 v_0 := v.Args[0] 32712 // match: (Store {t} ptr val mem) 32713 // cond: t.Size() == 8 && is64BitFloat(val.Type) 32714 // result: (MOVSDstore ptr val mem) 32715 for { 32716 t := auxToType(v.Aux) 32717 ptr := v_0 32718 val := v_1 32719 mem := v_2 32720 if !(t.Size() == 8 && is64BitFloat(val.Type)) { 32721 break 32722 } 32723 v.reset(OpAMD64MOVSDstore) 32724 v.AddArg3(ptr, val, mem) 32725 return true 32726 } 32727 // match: (Store {t} ptr val mem) 32728 // cond: t.Size() == 4 && is32BitFloat(val.Type) 32729 // result: (MOVSSstore ptr val mem) 32730 for { 32731 t := auxToType(v.Aux) 32732 ptr := v_0 32733 val := v_1 32734 mem := v_2 32735 if !(t.Size() == 4 && is32BitFloat(val.Type)) { 32736 break 32737 } 32738 v.reset(OpAMD64MOVSSstore) 32739 v.AddArg3(ptr, val, mem) 32740 return true 32741 } 32742 // match: (Store {t} ptr val mem) 32743 // cond: t.Size() == 8 32744 // result: (MOVQstore ptr val mem) 32745 for { 32746 t := auxToType(v.Aux) 32747 ptr := v_0 32748 val := v_1 32749 mem := v_2 32750 if !(t.Size() == 8) { 32751 break 32752 } 32753 v.reset(OpAMD64MOVQstore) 32754 v.AddArg3(ptr, val, mem) 32755 return true 32756 } 32757 // match: (Store {t} ptr val mem) 32758 // cond: t.Size() == 4 32759 // result: (MOVLstore ptr val mem) 32760 for { 32761 t := auxToType(v.Aux) 32762 ptr := v_0 32763 val := v_1 32764 mem := v_2 32765 if !(t.Size() == 4) { 32766 break 32767 } 32768 v.reset(OpAMD64MOVLstore) 32769 v.AddArg3(ptr, val, mem) 32770 return true 32771 } 32772 // match: (Store {t} ptr val mem) 32773 // cond: t.Size() == 2 32774 // result: (MOVWstore ptr val mem) 32775 for { 32776 t := auxToType(v.Aux) 32777 ptr := v_0 32778 val := v_1 32779 mem := v_2 32780 if !(t.Size() == 2) { 32781 break 32782 } 32783 v.reset(OpAMD64MOVWstore) 32784 v.AddArg3(ptr, val, mem) 32785 return true 32786 } 32787 // match: (Store {t} ptr val mem) 32788 // cond: t.Size() == 1 32789 // result: (MOVBstore ptr val mem) 32790 for { 32791 t := auxToType(v.Aux) 32792 ptr := v_0 32793 val := v_1 32794 mem := v_2 32795 if !(t.Size() == 1) { 32796 break 32797 } 32798 v.reset(OpAMD64MOVBstore) 32799 v.AddArg3(ptr, val, mem) 32800 return true 32801 } 32802 return false 32803 } 32804 func rewriteValueAMD64_OpTrunc(v *Value) bool { 32805 v_0 := v.Args[0] 32806 // match: (Trunc x) 32807 // result: (ROUNDSD [3] x) 32808 for { 32809 x := v_0 32810 v.reset(OpAMD64ROUNDSD) 32811 v.AuxInt = int8ToAuxInt(3) 32812 v.AddArg(x) 32813 return true 32814 } 32815 } 32816 func rewriteValueAMD64_OpZero(v *Value) bool { 32817 v_1 := v.Args[1] 32818 v_0 := v.Args[0] 32819 b := v.Block 32820 config := b.Func.Config 32821 typ := &b.Func.Config.Types 32822 // match: (Zero [0] _ mem) 32823 // result: mem 32824 for { 32825 if auxIntToInt64(v.AuxInt) != 0 { 32826 break 32827 } 32828 mem := v_1 32829 v.copyOf(mem) 32830 return true 32831 } 32832 // match: (Zero [1] destptr mem) 32833 // result: (MOVBstoreconst [makeValAndOff(0,0)] destptr mem) 32834 for { 32835 if auxIntToInt64(v.AuxInt) != 1 { 32836 break 32837 } 32838 destptr := v_0 32839 mem := v_1 32840 v.reset(OpAMD64MOVBstoreconst) 32841 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 32842 v.AddArg2(destptr, mem) 32843 return true 32844 } 32845 // match: (Zero [2] destptr mem) 32846 // result: (MOVWstoreconst [makeValAndOff(0,0)] destptr mem) 32847 for { 32848 if auxIntToInt64(v.AuxInt) != 2 { 32849 break 32850 } 32851 destptr := v_0 32852 mem := v_1 32853 v.reset(OpAMD64MOVWstoreconst) 32854 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 32855 v.AddArg2(destptr, mem) 32856 return true 32857 } 32858 // match: (Zero [4] destptr mem) 32859 // result: (MOVLstoreconst [makeValAndOff(0,0)] destptr mem) 32860 for { 32861 if auxIntToInt64(v.AuxInt) != 4 { 32862 break 32863 } 32864 destptr := v_0 32865 mem := v_1 32866 v.reset(OpAMD64MOVLstoreconst) 32867 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 32868 v.AddArg2(destptr, mem) 32869 return true 32870 } 32871 // match: (Zero [8] destptr mem) 32872 // result: (MOVQstoreconst [makeValAndOff(0,0)] destptr mem) 32873 for { 32874 if auxIntToInt64(v.AuxInt) != 8 { 32875 break 32876 } 32877 destptr := v_0 32878 mem := v_1 32879 v.reset(OpAMD64MOVQstoreconst) 32880 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 32881 v.AddArg2(destptr, mem) 32882 return true 32883 } 32884 // match: (Zero [3] destptr mem) 32885 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)) 32886 for { 32887 if auxIntToInt64(v.AuxInt) != 3 { 32888 break 32889 } 32890 destptr := v_0 32891 mem := v_1 32892 v.reset(OpAMD64MOVBstoreconst) 32893 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2)) 32894 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 32895 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 32896 v0.AddArg2(destptr, mem) 32897 v.AddArg2(destptr, v0) 32898 return true 32899 } 32900 // match: (Zero [5] destptr mem) 32901 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) 32902 for { 32903 if auxIntToInt64(v.AuxInt) != 5 { 32904 break 32905 } 32906 destptr := v_0 32907 mem := v_1 32908 v.reset(OpAMD64MOVBstoreconst) 32909 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) 32910 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 32911 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 32912 v0.AddArg2(destptr, mem) 32913 v.AddArg2(destptr, v0) 32914 return true 32915 } 32916 // match: (Zero [6] destptr mem) 32917 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) 32918 for { 32919 if auxIntToInt64(v.AuxInt) != 6 { 32920 break 32921 } 32922 destptr := v_0 32923 mem := v_1 32924 v.reset(OpAMD64MOVWstoreconst) 32925 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) 32926 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 32927 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 32928 v0.AddArg2(destptr, mem) 32929 v.AddArg2(destptr, v0) 32930 return true 32931 } 32932 // match: (Zero [7] destptr mem) 32933 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) 32934 for { 32935 if auxIntToInt64(v.AuxInt) != 7 { 32936 break 32937 } 32938 destptr := v_0 32939 mem := v_1 32940 v.reset(OpAMD64MOVLstoreconst) 32941 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3)) 32942 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 32943 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 32944 v0.AddArg2(destptr, mem) 32945 v.AddArg2(destptr, v0) 32946 return true 32947 } 32948 // match: (Zero [s] destptr mem) 32949 // cond: s%8 != 0 && s > 8 && !config.useSSE 32950 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) 32951 for { 32952 s := auxIntToInt64(v.AuxInt) 32953 destptr := v_0 32954 mem := v_1 32955 if !(s%8 != 0 && s > 8 && !config.useSSE) { 32956 break 32957 } 32958 v.reset(OpZero) 32959 v.AuxInt = int64ToAuxInt(s - s%8) 32960 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 32961 v0.AuxInt = int64ToAuxInt(s % 8) 32962 v0.AddArg(destptr) 32963 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 32964 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 32965 v1.AddArg2(destptr, mem) 32966 v.AddArg2(v0, v1) 32967 return true 32968 } 32969 // match: (Zero [16] destptr mem) 32970 // cond: !config.useSSE 32971 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) 32972 for { 32973 if auxIntToInt64(v.AuxInt) != 16 { 32974 break 32975 } 32976 destptr := v_0 32977 mem := v_1 32978 if !(!config.useSSE) { 32979 break 32980 } 32981 v.reset(OpAMD64MOVQstoreconst) 32982 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) 32983 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 32984 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 32985 v0.AddArg2(destptr, mem) 32986 v.AddArg2(destptr, v0) 32987 return true 32988 } 32989 // match: (Zero [24] destptr mem) 32990 // cond: !config.useSSE 32991 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))) 32992 for { 32993 if auxIntToInt64(v.AuxInt) != 24 { 32994 break 32995 } 32996 destptr := v_0 32997 mem := v_1 32998 if !(!config.useSSE) { 32999 break 33000 } 33001 v.reset(OpAMD64MOVQstoreconst) 33002 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) 33003 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 33004 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) 33005 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 33006 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 33007 v1.AddArg2(destptr, mem) 33008 v0.AddArg2(destptr, v1) 33009 v.AddArg2(destptr, v0) 33010 return true 33011 } 33012 // match: (Zero [32] destptr mem) 33013 // cond: !config.useSSE 33014 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))) 33015 for { 33016 if auxIntToInt64(v.AuxInt) != 32 { 33017 break 33018 } 33019 destptr := v_0 33020 mem := v_1 33021 if !(!config.useSSE) { 33022 break 33023 } 33024 v.reset(OpAMD64MOVQstoreconst) 33025 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 24)) 33026 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 33027 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) 33028 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 33029 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) 33030 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 33031 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 33032 v2.AddArg2(destptr, mem) 33033 v1.AddArg2(destptr, v2) 33034 v0.AddArg2(destptr, v1) 33035 v.AddArg2(destptr, v0) 33036 return true 33037 } 33038 // match: (Zero [s] destptr mem) 33039 // cond: s > 8 && s < 16 && config.useSSE 33040 // result: (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) 33041 for { 33042 s := auxIntToInt64(v.AuxInt) 33043 destptr := v_0 33044 mem := v_1 33045 if !(s > 8 && s < 16 && config.useSSE) { 33046 break 33047 } 33048 v.reset(OpAMD64MOVQstoreconst) 33049 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8))) 33050 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 33051 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 33052 v0.AddArg2(destptr, mem) 33053 v.AddArg2(destptr, v0) 33054 return true 33055 } 33056 // match: (Zero [s] destptr mem) 33057 // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE 33058 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) 33059 for { 33060 s := auxIntToInt64(v.AuxInt) 33061 destptr := v_0 33062 mem := v_1 33063 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { 33064 break 33065 } 33066 v.reset(OpZero) 33067 v.AuxInt = int64ToAuxInt(s - s%16) 33068 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 33069 v0.AuxInt = int64ToAuxInt(s % 16) 33070 v0.AddArg(destptr) 33071 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 33072 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 33073 v1.AddArg2(destptr, mem) 33074 v.AddArg2(v0, v1) 33075 return true 33076 } 33077 // match: (Zero [s] destptr mem) 33078 // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE 33079 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) 33080 for { 33081 s := auxIntToInt64(v.AuxInt) 33082 destptr := v_0 33083 mem := v_1 33084 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { 33085 break 33086 } 33087 v.reset(OpZero) 33088 v.AuxInt = int64ToAuxInt(s - s%16) 33089 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 33090 v0.AuxInt = int64ToAuxInt(s % 16) 33091 v0.AddArg(destptr) 33092 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 33093 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 33094 v1.AddArg2(destptr, mem) 33095 v.AddArg2(v0, v1) 33096 return true 33097 } 33098 // match: (Zero [16] destptr mem) 33099 // cond: config.useSSE 33100 // result: (MOVOstoreconst [makeValAndOff(0,0)] destptr mem) 33101 for { 33102 if auxIntToInt64(v.AuxInt) != 16 { 33103 break 33104 } 33105 destptr := v_0 33106 mem := v_1 33107 if !(config.useSSE) { 33108 break 33109 } 33110 v.reset(OpAMD64MOVOstoreconst) 33111 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 33112 v.AddArg2(destptr, mem) 33113 return true 33114 } 33115 // match: (Zero [32] destptr mem) 33116 // cond: config.useSSE 33117 // result: (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) 33118 for { 33119 if auxIntToInt64(v.AuxInt) != 32 { 33120 break 33121 } 33122 destptr := v_0 33123 mem := v_1 33124 if !(config.useSSE) { 33125 break 33126 } 33127 v.reset(OpAMD64MOVOstoreconst) 33128 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) 33129 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 33130 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 33131 v0.AddArg2(destptr, mem) 33132 v.AddArg2(destptr, v0) 33133 return true 33134 } 33135 // match: (Zero [48] destptr mem) 33136 // cond: config.useSSE 33137 // result: (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))) 33138 for { 33139 if auxIntToInt64(v.AuxInt) != 48 { 33140 break 33141 } 33142 destptr := v_0 33143 mem := v_1 33144 if !(config.useSSE) { 33145 break 33146 } 33147 v.reset(OpAMD64MOVOstoreconst) 33148 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32)) 33149 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 33150 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) 33151 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 33152 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 33153 v1.AddArg2(destptr, mem) 33154 v0.AddArg2(destptr, v1) 33155 v.AddArg2(destptr, v0) 33156 return true 33157 } 33158 // match: (Zero [64] destptr mem) 33159 // cond: config.useSSE 33160 // result: (MOVOstoreconst [makeValAndOff(0,48)] destptr (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))) 33161 for { 33162 if auxIntToInt64(v.AuxInt) != 64 { 33163 break 33164 } 33165 destptr := v_0 33166 mem := v_1 33167 if !(config.useSSE) { 33168 break 33169 } 33170 v.reset(OpAMD64MOVOstoreconst) 33171 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48)) 33172 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 33173 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32)) 33174 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 33175 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) 33176 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 33177 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 33178 v2.AddArg2(destptr, mem) 33179 v1.AddArg2(destptr, v2) 33180 v0.AddArg2(destptr, v1) 33181 v.AddArg2(destptr, v0) 33182 return true 33183 } 33184 // match: (Zero [s] destptr mem) 33185 // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice 33186 // result: (DUFFZERO [s] destptr mem) 33187 for { 33188 s := auxIntToInt64(v.AuxInt) 33189 destptr := v_0 33190 mem := v_1 33191 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 33192 break 33193 } 33194 v.reset(OpAMD64DUFFZERO) 33195 v.AuxInt = int64ToAuxInt(s) 33196 v.AddArg2(destptr, mem) 33197 return true 33198 } 33199 // match: (Zero [s] destptr mem) 33200 // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 33201 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 33202 for { 33203 s := auxIntToInt64(v.AuxInt) 33204 destptr := v_0 33205 mem := v_1 33206 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { 33207 break 33208 } 33209 v.reset(OpAMD64REPSTOSQ) 33210 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 33211 v0.AuxInt = int64ToAuxInt(s / 8) 33212 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 33213 v1.AuxInt = int64ToAuxInt(0) 33214 v.AddArg4(destptr, v0, v1, mem) 33215 return true 33216 } 33217 return false 33218 } 33219 func rewriteBlockAMD64(b *Block) bool { 33220 typ := &b.Func.Config.Types 33221 switch b.Kind { 33222 case BlockAMD64EQ: 33223 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 33224 // result: (UGE (BTL x y)) 33225 for b.Controls[0].Op == OpAMD64TESTL { 33226 v_0 := b.Controls[0] 33227 _ = v_0.Args[1] 33228 v_0_0 := v_0.Args[0] 33229 v_0_1 := v_0.Args[1] 33230 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 33231 if v_0_0.Op != OpAMD64SHLL { 33232 continue 33233 } 33234 x := v_0_0.Args[1] 33235 v_0_0_0 := v_0_0.Args[0] 33236 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { 33237 continue 33238 } 33239 y := v_0_1 33240 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) 33241 v0.AddArg2(x, y) 33242 b.resetWithControl(BlockAMD64UGE, v0) 33243 return true 33244 } 33245 break 33246 } 33247 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 33248 // result: (UGE (BTQ x y)) 33249 for b.Controls[0].Op == OpAMD64TESTQ { 33250 v_0 := b.Controls[0] 33251 _ = v_0.Args[1] 33252 v_0_0 := v_0.Args[0] 33253 v_0_1 := v_0.Args[1] 33254 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 33255 if v_0_0.Op != OpAMD64SHLQ { 33256 continue 33257 } 33258 x := v_0_0.Args[1] 33259 v_0_0_0 := v_0_0.Args[0] 33260 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { 33261 continue 33262 } 33263 y := v_0_1 33264 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) 33265 v0.AddArg2(x, y) 33266 b.resetWithControl(BlockAMD64UGE, v0) 33267 return true 33268 } 33269 break 33270 } 33271 // match: (EQ (TESTLconst [c] x)) 33272 // cond: isUint32PowerOfTwo(int64(c)) 33273 // result: (UGE (BTLconst [int8(log32(c))] x)) 33274 for b.Controls[0].Op == OpAMD64TESTLconst { 33275 v_0 := b.Controls[0] 33276 c := auxIntToInt32(v_0.AuxInt) 33277 x := v_0.Args[0] 33278 if !(isUint32PowerOfTwo(int64(c))) { 33279 break 33280 } 33281 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 33282 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 33283 v0.AddArg(x) 33284 b.resetWithControl(BlockAMD64UGE, v0) 33285 return true 33286 } 33287 // match: (EQ (TESTQconst [c] x)) 33288 // cond: isUint64PowerOfTwo(int64(c)) 33289 // result: (UGE (BTQconst [int8(log32(c))] x)) 33290 for b.Controls[0].Op == OpAMD64TESTQconst { 33291 v_0 := b.Controls[0] 33292 c := auxIntToInt32(v_0.AuxInt) 33293 x := v_0.Args[0] 33294 if !(isUint64PowerOfTwo(int64(c))) { 33295 break 33296 } 33297 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 33298 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 33299 v0.AddArg(x) 33300 b.resetWithControl(BlockAMD64UGE, v0) 33301 return true 33302 } 33303 // match: (EQ (TESTQ (MOVQconst [c]) x)) 33304 // cond: isUint64PowerOfTwo(c) 33305 // result: (UGE (BTQconst [int8(log64(c))] x)) 33306 for b.Controls[0].Op == OpAMD64TESTQ { 33307 v_0 := b.Controls[0] 33308 _ = v_0.Args[1] 33309 v_0_0 := v_0.Args[0] 33310 v_0_1 := v_0.Args[1] 33311 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 33312 if v_0_0.Op != OpAMD64MOVQconst { 33313 continue 33314 } 33315 c := auxIntToInt64(v_0_0.AuxInt) 33316 x := v_0_1 33317 if !(isUint64PowerOfTwo(c)) { 33318 continue 33319 } 33320 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 33321 v0.AuxInt = int8ToAuxInt(int8(log64(c))) 33322 v0.AddArg(x) 33323 b.resetWithControl(BlockAMD64UGE, v0) 33324 return true 33325 } 33326 break 33327 } 33328 // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 33329 // cond: z1==z2 33330 // result: (UGE (BTQconst [63] x)) 33331 for b.Controls[0].Op == OpAMD64TESTQ { 33332 v_0 := b.Controls[0] 33333 _ = v_0.Args[1] 33334 v_0_0 := v_0.Args[0] 33335 v_0_1 := v_0.Args[1] 33336 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 33337 z1 := v_0_0 33338 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { 33339 continue 33340 } 33341 z1_0 := z1.Args[0] 33342 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 33343 continue 33344 } 33345 x := z1_0.Args[0] 33346 z2 := v_0_1 33347 if !(z1 == z2) { 33348 continue 33349 } 33350 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 33351 v0.AuxInt = int8ToAuxInt(63) 33352 v0.AddArg(x) 33353 b.resetWithControl(BlockAMD64UGE, v0) 33354 return true 33355 } 33356 break 33357 } 33358 // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 33359 // cond: z1==z2 33360 // result: (UGE (BTQconst [31] x)) 33361 for b.Controls[0].Op == OpAMD64TESTL { 33362 v_0 := b.Controls[0] 33363 _ = v_0.Args[1] 33364 v_0_0 := v_0.Args[0] 33365 v_0_1 := v_0.Args[1] 33366 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 33367 z1 := v_0_0 33368 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { 33369 continue 33370 } 33371 z1_0 := z1.Args[0] 33372 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { 33373 continue 33374 } 33375 x := z1_0.Args[0] 33376 z2 := v_0_1 33377 if !(z1 == z2) { 33378 continue 33379 } 33380 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 33381 v0.AuxInt = int8ToAuxInt(31) 33382 v0.AddArg(x) 33383 b.resetWithControl(BlockAMD64UGE, v0) 33384 return true 33385 } 33386 break 33387 } 33388 // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 33389 // cond: z1==z2 33390 // result: (UGE (BTQconst [0] x)) 33391 for b.Controls[0].Op == OpAMD64TESTQ { 33392 v_0 := b.Controls[0] 33393 _ = v_0.Args[1] 33394 v_0_0 := v_0.Args[0] 33395 v_0_1 := v_0.Args[1] 33396 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 33397 z1 := v_0_0 33398 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 33399 continue 33400 } 33401 z1_0 := z1.Args[0] 33402 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 33403 continue 33404 } 33405 x := z1_0.Args[0] 33406 z2 := v_0_1 33407 if !(z1 == z2) { 33408 continue 33409 } 33410 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 33411 v0.AuxInt = int8ToAuxInt(0) 33412 v0.AddArg(x) 33413 b.resetWithControl(BlockAMD64UGE, v0) 33414 return true 33415 } 33416 break 33417 } 33418 // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 33419 // cond: z1==z2 33420 // result: (UGE (BTLconst [0] x)) 33421 for b.Controls[0].Op == OpAMD64TESTL { 33422 v_0 := b.Controls[0] 33423 _ = v_0.Args[1] 33424 v_0_0 := v_0.Args[0] 33425 v_0_1 := v_0.Args[1] 33426 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 33427 z1 := v_0_0 33428 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 33429 continue 33430 } 33431 z1_0 := z1.Args[0] 33432 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 33433 continue 33434 } 33435 x := z1_0.Args[0] 33436 z2 := v_0_1 33437 if !(z1 == z2) { 33438 continue 33439 } 33440 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 33441 v0.AuxInt = int8ToAuxInt(0) 33442 v0.AddArg(x) 33443 b.resetWithControl(BlockAMD64UGE, v0) 33444 return true 33445 } 33446 break 33447 } 33448 // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2)) 33449 // cond: z1==z2 33450 // result: (UGE (BTQconst [63] x)) 33451 for b.Controls[0].Op == OpAMD64TESTQ { 33452 v_0 := b.Controls[0] 33453 _ = v_0.Args[1] 33454 v_0_0 := v_0.Args[0] 33455 v_0_1 := v_0.Args[1] 33456 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 33457 z1 := v_0_0 33458 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 33459 continue 33460 } 33461 x := z1.Args[0] 33462 z2 := v_0_1 33463 if !(z1 == z2) { 33464 continue 33465 } 33466 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 33467 v0.AuxInt = int8ToAuxInt(63) 33468 v0.AddArg(x) 33469 b.resetWithControl(BlockAMD64UGE, v0) 33470 return true 33471 } 33472 break 33473 } 33474 // match: (EQ (TESTL z1:(SHRLconst [31] x) z2)) 33475 // cond: z1==z2 33476 // result: (UGE (BTLconst [31] x)) 33477 for b.Controls[0].Op == OpAMD64TESTL { 33478 v_0 := b.Controls[0] 33479 _ = v_0.Args[1] 33480 v_0_0 := v_0.Args[0] 33481 v_0_1 := v_0.Args[1] 33482 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 33483 z1 := v_0_0 33484 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 33485 continue 33486 } 33487 x := z1.Args[0] 33488 z2 := v_0_1 33489 if !(z1 == z2) { 33490 continue 33491 } 33492 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 33493 v0.AuxInt = int8ToAuxInt(31) 33494 v0.AddArg(x) 33495 b.resetWithControl(BlockAMD64UGE, v0) 33496 return true 33497 } 33498 break 33499 } 33500 // match: (EQ (InvertFlags cmp) yes no) 33501 // result: (EQ cmp yes no) 33502 for b.Controls[0].Op == OpAMD64InvertFlags { 33503 v_0 := b.Controls[0] 33504 cmp := v_0.Args[0] 33505 b.resetWithControl(BlockAMD64EQ, cmp) 33506 return true 33507 } 33508 // match: (EQ (FlagEQ) yes no) 33509 // result: (First yes no) 33510 for b.Controls[0].Op == OpAMD64FlagEQ { 33511 b.Reset(BlockFirst) 33512 return true 33513 } 33514 // match: (EQ (FlagLT_ULT) yes no) 33515 // result: (First no yes) 33516 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 33517 b.Reset(BlockFirst) 33518 b.swapSuccessors() 33519 return true 33520 } 33521 // match: (EQ (FlagLT_UGT) yes no) 33522 // result: (First no yes) 33523 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 33524 b.Reset(BlockFirst) 33525 b.swapSuccessors() 33526 return true 33527 } 33528 // match: (EQ (FlagGT_ULT) yes no) 33529 // result: (First no yes) 33530 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 33531 b.Reset(BlockFirst) 33532 b.swapSuccessors() 33533 return true 33534 } 33535 // match: (EQ (FlagGT_UGT) yes no) 33536 // result: (First no yes) 33537 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 33538 b.Reset(BlockFirst) 33539 b.swapSuccessors() 33540 return true 33541 } 33542 case BlockAMD64GE: 33543 // match: (GE (InvertFlags cmp) yes no) 33544 // result: (LE cmp yes no) 33545 for b.Controls[0].Op == OpAMD64InvertFlags { 33546 v_0 := b.Controls[0] 33547 cmp := v_0.Args[0] 33548 b.resetWithControl(BlockAMD64LE, cmp) 33549 return true 33550 } 33551 // match: (GE (FlagEQ) yes no) 33552 // result: (First yes no) 33553 for b.Controls[0].Op == OpAMD64FlagEQ { 33554 b.Reset(BlockFirst) 33555 return true 33556 } 33557 // match: (GE (FlagLT_ULT) yes no) 33558 // result: (First no yes) 33559 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 33560 b.Reset(BlockFirst) 33561 b.swapSuccessors() 33562 return true 33563 } 33564 // match: (GE (FlagLT_UGT) yes no) 33565 // result: (First no yes) 33566 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 33567 b.Reset(BlockFirst) 33568 b.swapSuccessors() 33569 return true 33570 } 33571 // match: (GE (FlagGT_ULT) yes no) 33572 // result: (First yes no) 33573 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 33574 b.Reset(BlockFirst) 33575 return true 33576 } 33577 // match: (GE (FlagGT_UGT) yes no) 33578 // result: (First yes no) 33579 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 33580 b.Reset(BlockFirst) 33581 return true 33582 } 33583 case BlockAMD64GT: 33584 // match: (GT (InvertFlags cmp) yes no) 33585 // result: (LT cmp yes no) 33586 for b.Controls[0].Op == OpAMD64InvertFlags { 33587 v_0 := b.Controls[0] 33588 cmp := v_0.Args[0] 33589 b.resetWithControl(BlockAMD64LT, cmp) 33590 return true 33591 } 33592 // match: (GT (FlagEQ) yes no) 33593 // result: (First no yes) 33594 for b.Controls[0].Op == OpAMD64FlagEQ { 33595 b.Reset(BlockFirst) 33596 b.swapSuccessors() 33597 return true 33598 } 33599 // match: (GT (FlagLT_ULT) yes no) 33600 // result: (First no yes) 33601 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 33602 b.Reset(BlockFirst) 33603 b.swapSuccessors() 33604 return true 33605 } 33606 // match: (GT (FlagLT_UGT) yes no) 33607 // result: (First no yes) 33608 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 33609 b.Reset(BlockFirst) 33610 b.swapSuccessors() 33611 return true 33612 } 33613 // match: (GT (FlagGT_ULT) yes no) 33614 // result: (First yes no) 33615 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 33616 b.Reset(BlockFirst) 33617 return true 33618 } 33619 // match: (GT (FlagGT_UGT) yes no) 33620 // result: (First yes no) 33621 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 33622 b.Reset(BlockFirst) 33623 return true 33624 } 33625 case BlockIf: 33626 // match: (If (SETL cmp) yes no) 33627 // result: (LT cmp yes no) 33628 for b.Controls[0].Op == OpAMD64SETL { 33629 v_0 := b.Controls[0] 33630 cmp := v_0.Args[0] 33631 b.resetWithControl(BlockAMD64LT, cmp) 33632 return true 33633 } 33634 // match: (If (SETLE cmp) yes no) 33635 // result: (LE cmp yes no) 33636 for b.Controls[0].Op == OpAMD64SETLE { 33637 v_0 := b.Controls[0] 33638 cmp := v_0.Args[0] 33639 b.resetWithControl(BlockAMD64LE, cmp) 33640 return true 33641 } 33642 // match: (If (SETG cmp) yes no) 33643 // result: (GT cmp yes no) 33644 for b.Controls[0].Op == OpAMD64SETG { 33645 v_0 := b.Controls[0] 33646 cmp := v_0.Args[0] 33647 b.resetWithControl(BlockAMD64GT, cmp) 33648 return true 33649 } 33650 // match: (If (SETGE cmp) yes no) 33651 // result: (GE cmp yes no) 33652 for b.Controls[0].Op == OpAMD64SETGE { 33653 v_0 := b.Controls[0] 33654 cmp := v_0.Args[0] 33655 b.resetWithControl(BlockAMD64GE, cmp) 33656 return true 33657 } 33658 // match: (If (SETEQ cmp) yes no) 33659 // result: (EQ cmp yes no) 33660 for b.Controls[0].Op == OpAMD64SETEQ { 33661 v_0 := b.Controls[0] 33662 cmp := v_0.Args[0] 33663 b.resetWithControl(BlockAMD64EQ, cmp) 33664 return true 33665 } 33666 // match: (If (SETNE cmp) yes no) 33667 // result: (NE cmp yes no) 33668 for b.Controls[0].Op == OpAMD64SETNE { 33669 v_0 := b.Controls[0] 33670 cmp := v_0.Args[0] 33671 b.resetWithControl(BlockAMD64NE, cmp) 33672 return true 33673 } 33674 // match: (If (SETB cmp) yes no) 33675 // result: (ULT cmp yes no) 33676 for b.Controls[0].Op == OpAMD64SETB { 33677 v_0 := b.Controls[0] 33678 cmp := v_0.Args[0] 33679 b.resetWithControl(BlockAMD64ULT, cmp) 33680 return true 33681 } 33682 // match: (If (SETBE cmp) yes no) 33683 // result: (ULE cmp yes no) 33684 for b.Controls[0].Op == OpAMD64SETBE { 33685 v_0 := b.Controls[0] 33686 cmp := v_0.Args[0] 33687 b.resetWithControl(BlockAMD64ULE, cmp) 33688 return true 33689 } 33690 // match: (If (SETA cmp) yes no) 33691 // result: (UGT cmp yes no) 33692 for b.Controls[0].Op == OpAMD64SETA { 33693 v_0 := b.Controls[0] 33694 cmp := v_0.Args[0] 33695 b.resetWithControl(BlockAMD64UGT, cmp) 33696 return true 33697 } 33698 // match: (If (SETAE cmp) yes no) 33699 // result: (UGE cmp yes no) 33700 for b.Controls[0].Op == OpAMD64SETAE { 33701 v_0 := b.Controls[0] 33702 cmp := v_0.Args[0] 33703 b.resetWithControl(BlockAMD64UGE, cmp) 33704 return true 33705 } 33706 // match: (If (SETO cmp) yes no) 33707 // result: (OS cmp yes no) 33708 for b.Controls[0].Op == OpAMD64SETO { 33709 v_0 := b.Controls[0] 33710 cmp := v_0.Args[0] 33711 b.resetWithControl(BlockAMD64OS, cmp) 33712 return true 33713 } 33714 // match: (If (SETGF cmp) yes no) 33715 // result: (UGT cmp yes no) 33716 for b.Controls[0].Op == OpAMD64SETGF { 33717 v_0 := b.Controls[0] 33718 cmp := v_0.Args[0] 33719 b.resetWithControl(BlockAMD64UGT, cmp) 33720 return true 33721 } 33722 // match: (If (SETGEF cmp) yes no) 33723 // result: (UGE cmp yes no) 33724 for b.Controls[0].Op == OpAMD64SETGEF { 33725 v_0 := b.Controls[0] 33726 cmp := v_0.Args[0] 33727 b.resetWithControl(BlockAMD64UGE, cmp) 33728 return true 33729 } 33730 // match: (If (SETEQF cmp) yes no) 33731 // result: (EQF cmp yes no) 33732 for b.Controls[0].Op == OpAMD64SETEQF { 33733 v_0 := b.Controls[0] 33734 cmp := v_0.Args[0] 33735 b.resetWithControl(BlockAMD64EQF, cmp) 33736 return true 33737 } 33738 // match: (If (SETNEF cmp) yes no) 33739 // result: (NEF cmp yes no) 33740 for b.Controls[0].Op == OpAMD64SETNEF { 33741 v_0 := b.Controls[0] 33742 cmp := v_0.Args[0] 33743 b.resetWithControl(BlockAMD64NEF, cmp) 33744 return true 33745 } 33746 // match: (If cond yes no) 33747 // result: (NE (TESTB cond cond) yes no) 33748 for { 33749 cond := b.Controls[0] 33750 v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags) 33751 v0.AddArg2(cond, cond) 33752 b.resetWithControl(BlockAMD64NE, v0) 33753 return true 33754 } 33755 case BlockJumpTable: 33756 // match: (JumpTable idx) 33757 // result: (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ <typ.Uintptr> {makeJumpTableSym(b)} (SB))) 33758 for { 33759 idx := b.Controls[0] 33760 v0 := b.NewValue0(b.Pos, OpAMD64LEAQ, typ.Uintptr) 33761 v0.Aux = symToAux(makeJumpTableSym(b)) 33762 v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr) 33763 v0.AddArg(v1) 33764 b.resetWithControl2(BlockAMD64JUMPTABLE, idx, v0) 33765 b.Aux = symToAux(makeJumpTableSym(b)) 33766 return true 33767 } 33768 case BlockAMD64LE: 33769 // match: (LE (InvertFlags cmp) yes no) 33770 // result: (GE cmp yes no) 33771 for b.Controls[0].Op == OpAMD64InvertFlags { 33772 v_0 := b.Controls[0] 33773 cmp := v_0.Args[0] 33774 b.resetWithControl(BlockAMD64GE, cmp) 33775 return true 33776 } 33777 // match: (LE (FlagEQ) yes no) 33778 // result: (First yes no) 33779 for b.Controls[0].Op == OpAMD64FlagEQ { 33780 b.Reset(BlockFirst) 33781 return true 33782 } 33783 // match: (LE (FlagLT_ULT) yes no) 33784 // result: (First yes no) 33785 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 33786 b.Reset(BlockFirst) 33787 return true 33788 } 33789 // match: (LE (FlagLT_UGT) yes no) 33790 // result: (First yes no) 33791 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 33792 b.Reset(BlockFirst) 33793 return true 33794 } 33795 // match: (LE (FlagGT_ULT) yes no) 33796 // result: (First no yes) 33797 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 33798 b.Reset(BlockFirst) 33799 b.swapSuccessors() 33800 return true 33801 } 33802 // match: (LE (FlagGT_UGT) yes no) 33803 // result: (First no yes) 33804 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 33805 b.Reset(BlockFirst) 33806 b.swapSuccessors() 33807 return true 33808 } 33809 case BlockAMD64LT: 33810 // match: (LT (InvertFlags cmp) yes no) 33811 // result: (GT cmp yes no) 33812 for b.Controls[0].Op == OpAMD64InvertFlags { 33813 v_0 := b.Controls[0] 33814 cmp := v_0.Args[0] 33815 b.resetWithControl(BlockAMD64GT, cmp) 33816 return true 33817 } 33818 // match: (LT (FlagEQ) yes no) 33819 // result: (First no yes) 33820 for b.Controls[0].Op == OpAMD64FlagEQ { 33821 b.Reset(BlockFirst) 33822 b.swapSuccessors() 33823 return true 33824 } 33825 // match: (LT (FlagLT_ULT) yes no) 33826 // result: (First yes no) 33827 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 33828 b.Reset(BlockFirst) 33829 return true 33830 } 33831 // match: (LT (FlagLT_UGT) yes no) 33832 // result: (First yes no) 33833 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 33834 b.Reset(BlockFirst) 33835 return true 33836 } 33837 // match: (LT (FlagGT_ULT) yes no) 33838 // result: (First no yes) 33839 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 33840 b.Reset(BlockFirst) 33841 b.swapSuccessors() 33842 return true 33843 } 33844 // match: (LT (FlagGT_UGT) yes no) 33845 // result: (First no yes) 33846 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 33847 b.Reset(BlockFirst) 33848 b.swapSuccessors() 33849 return true 33850 } 33851 case BlockAMD64NE: 33852 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 33853 // result: (LT cmp yes no) 33854 for b.Controls[0].Op == OpAMD64TESTB { 33855 v_0 := b.Controls[0] 33856 _ = v_0.Args[1] 33857 v_0_0 := v_0.Args[0] 33858 if v_0_0.Op != OpAMD64SETL { 33859 break 33860 } 33861 cmp := v_0_0.Args[0] 33862 v_0_1 := v_0.Args[1] 33863 if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] { 33864 break 33865 } 33866 b.resetWithControl(BlockAMD64LT, cmp) 33867 return true 33868 } 33869 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 33870 // result: (LE cmp yes no) 33871 for b.Controls[0].Op == OpAMD64TESTB { 33872 v_0 := b.Controls[0] 33873 _ = v_0.Args[1] 33874 v_0_0 := v_0.Args[0] 33875 if v_0_0.Op != OpAMD64SETLE { 33876 break 33877 } 33878 cmp := v_0_0.Args[0] 33879 v_0_1 := v_0.Args[1] 33880 if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] { 33881 break 33882 } 33883 b.resetWithControl(BlockAMD64LE, cmp) 33884 return true 33885 } 33886 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 33887 // result: (GT cmp yes no) 33888 for b.Controls[0].Op == OpAMD64TESTB { 33889 v_0 := b.Controls[0] 33890 _ = v_0.Args[1] 33891 v_0_0 := v_0.Args[0] 33892 if v_0_0.Op != OpAMD64SETG { 33893 break 33894 } 33895 cmp := v_0_0.Args[0] 33896 v_0_1 := v_0.Args[1] 33897 if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] { 33898 break 33899 } 33900 b.resetWithControl(BlockAMD64GT, cmp) 33901 return true 33902 } 33903 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 33904 // result: (GE cmp yes no) 33905 for b.Controls[0].Op == OpAMD64TESTB { 33906 v_0 := b.Controls[0] 33907 _ = v_0.Args[1] 33908 v_0_0 := v_0.Args[0] 33909 if v_0_0.Op != OpAMD64SETGE { 33910 break 33911 } 33912 cmp := v_0_0.Args[0] 33913 v_0_1 := v_0.Args[1] 33914 if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] { 33915 break 33916 } 33917 b.resetWithControl(BlockAMD64GE, cmp) 33918 return true 33919 } 33920 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 33921 // result: (EQ cmp yes no) 33922 for b.Controls[0].Op == OpAMD64TESTB { 33923 v_0 := b.Controls[0] 33924 _ = v_0.Args[1] 33925 v_0_0 := v_0.Args[0] 33926 if v_0_0.Op != OpAMD64SETEQ { 33927 break 33928 } 33929 cmp := v_0_0.Args[0] 33930 v_0_1 := v_0.Args[1] 33931 if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] { 33932 break 33933 } 33934 b.resetWithControl(BlockAMD64EQ, cmp) 33935 return true 33936 } 33937 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 33938 // result: (NE cmp yes no) 33939 for b.Controls[0].Op == OpAMD64TESTB { 33940 v_0 := b.Controls[0] 33941 _ = v_0.Args[1] 33942 v_0_0 := v_0.Args[0] 33943 if v_0_0.Op != OpAMD64SETNE { 33944 break 33945 } 33946 cmp := v_0_0.Args[0] 33947 v_0_1 := v_0.Args[1] 33948 if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] { 33949 break 33950 } 33951 b.resetWithControl(BlockAMD64NE, cmp) 33952 return true 33953 } 33954 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 33955 // result: (ULT cmp yes no) 33956 for b.Controls[0].Op == OpAMD64TESTB { 33957 v_0 := b.Controls[0] 33958 _ = v_0.Args[1] 33959 v_0_0 := v_0.Args[0] 33960 if v_0_0.Op != OpAMD64SETB { 33961 break 33962 } 33963 cmp := v_0_0.Args[0] 33964 v_0_1 := v_0.Args[1] 33965 if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] { 33966 break 33967 } 33968 b.resetWithControl(BlockAMD64ULT, cmp) 33969 return true 33970 } 33971 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 33972 // result: (ULE cmp yes no) 33973 for b.Controls[0].Op == OpAMD64TESTB { 33974 v_0 := b.Controls[0] 33975 _ = v_0.Args[1] 33976 v_0_0 := v_0.Args[0] 33977 if v_0_0.Op != OpAMD64SETBE { 33978 break 33979 } 33980 cmp := v_0_0.Args[0] 33981 v_0_1 := v_0.Args[1] 33982 if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] { 33983 break 33984 } 33985 b.resetWithControl(BlockAMD64ULE, cmp) 33986 return true 33987 } 33988 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 33989 // result: (UGT cmp yes no) 33990 for b.Controls[0].Op == OpAMD64TESTB { 33991 v_0 := b.Controls[0] 33992 _ = v_0.Args[1] 33993 v_0_0 := v_0.Args[0] 33994 if v_0_0.Op != OpAMD64SETA { 33995 break 33996 } 33997 cmp := v_0_0.Args[0] 33998 v_0_1 := v_0.Args[1] 33999 if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] { 34000 break 34001 } 34002 b.resetWithControl(BlockAMD64UGT, cmp) 34003 return true 34004 } 34005 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 34006 // result: (UGE cmp yes no) 34007 for b.Controls[0].Op == OpAMD64TESTB { 34008 v_0 := b.Controls[0] 34009 _ = v_0.Args[1] 34010 v_0_0 := v_0.Args[0] 34011 if v_0_0.Op != OpAMD64SETAE { 34012 break 34013 } 34014 cmp := v_0_0.Args[0] 34015 v_0_1 := v_0.Args[1] 34016 if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] { 34017 break 34018 } 34019 b.resetWithControl(BlockAMD64UGE, cmp) 34020 return true 34021 } 34022 // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) 34023 // result: (OS cmp yes no) 34024 for b.Controls[0].Op == OpAMD64TESTB { 34025 v_0 := b.Controls[0] 34026 _ = v_0.Args[1] 34027 v_0_0 := v_0.Args[0] 34028 if v_0_0.Op != OpAMD64SETO { 34029 break 34030 } 34031 cmp := v_0_0.Args[0] 34032 v_0_1 := v_0.Args[1] 34033 if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] { 34034 break 34035 } 34036 b.resetWithControl(BlockAMD64OS, cmp) 34037 return true 34038 } 34039 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 34040 // result: (ULT (BTL x y)) 34041 for b.Controls[0].Op == OpAMD64TESTL { 34042 v_0 := b.Controls[0] 34043 _ = v_0.Args[1] 34044 v_0_0 := v_0.Args[0] 34045 v_0_1 := v_0.Args[1] 34046 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 34047 if v_0_0.Op != OpAMD64SHLL { 34048 continue 34049 } 34050 x := v_0_0.Args[1] 34051 v_0_0_0 := v_0_0.Args[0] 34052 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { 34053 continue 34054 } 34055 y := v_0_1 34056 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) 34057 v0.AddArg2(x, y) 34058 b.resetWithControl(BlockAMD64ULT, v0) 34059 return true 34060 } 34061 break 34062 } 34063 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 34064 // result: (ULT (BTQ x y)) 34065 for b.Controls[0].Op == OpAMD64TESTQ { 34066 v_0 := b.Controls[0] 34067 _ = v_0.Args[1] 34068 v_0_0 := v_0.Args[0] 34069 v_0_1 := v_0.Args[1] 34070 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 34071 if v_0_0.Op != OpAMD64SHLQ { 34072 continue 34073 } 34074 x := v_0_0.Args[1] 34075 v_0_0_0 := v_0_0.Args[0] 34076 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { 34077 continue 34078 } 34079 y := v_0_1 34080 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) 34081 v0.AddArg2(x, y) 34082 b.resetWithControl(BlockAMD64ULT, v0) 34083 return true 34084 } 34085 break 34086 } 34087 // match: (NE (TESTLconst [c] x)) 34088 // cond: isUint32PowerOfTwo(int64(c)) 34089 // result: (ULT (BTLconst [int8(log32(c))] x)) 34090 for b.Controls[0].Op == OpAMD64TESTLconst { 34091 v_0 := b.Controls[0] 34092 c := auxIntToInt32(v_0.AuxInt) 34093 x := v_0.Args[0] 34094 if !(isUint32PowerOfTwo(int64(c))) { 34095 break 34096 } 34097 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 34098 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 34099 v0.AddArg(x) 34100 b.resetWithControl(BlockAMD64ULT, v0) 34101 return true 34102 } 34103 // match: (NE (TESTQconst [c] x)) 34104 // cond: isUint64PowerOfTwo(int64(c)) 34105 // result: (ULT (BTQconst [int8(log32(c))] x)) 34106 for b.Controls[0].Op == OpAMD64TESTQconst { 34107 v_0 := b.Controls[0] 34108 c := auxIntToInt32(v_0.AuxInt) 34109 x := v_0.Args[0] 34110 if !(isUint64PowerOfTwo(int64(c))) { 34111 break 34112 } 34113 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 34114 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 34115 v0.AddArg(x) 34116 b.resetWithControl(BlockAMD64ULT, v0) 34117 return true 34118 } 34119 // match: (NE (TESTQ (MOVQconst [c]) x)) 34120 // cond: isUint64PowerOfTwo(c) 34121 // result: (ULT (BTQconst [int8(log64(c))] x)) 34122 for b.Controls[0].Op == OpAMD64TESTQ { 34123 v_0 := b.Controls[0] 34124 _ = v_0.Args[1] 34125 v_0_0 := v_0.Args[0] 34126 v_0_1 := v_0.Args[1] 34127 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 34128 if v_0_0.Op != OpAMD64MOVQconst { 34129 continue 34130 } 34131 c := auxIntToInt64(v_0_0.AuxInt) 34132 x := v_0_1 34133 if !(isUint64PowerOfTwo(c)) { 34134 continue 34135 } 34136 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 34137 v0.AuxInt = int8ToAuxInt(int8(log64(c))) 34138 v0.AddArg(x) 34139 b.resetWithControl(BlockAMD64ULT, v0) 34140 return true 34141 } 34142 break 34143 } 34144 // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 34145 // cond: z1==z2 34146 // result: (ULT (BTQconst [63] x)) 34147 for b.Controls[0].Op == OpAMD64TESTQ { 34148 v_0 := b.Controls[0] 34149 _ = v_0.Args[1] 34150 v_0_0 := v_0.Args[0] 34151 v_0_1 := v_0.Args[1] 34152 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 34153 z1 := v_0_0 34154 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { 34155 continue 34156 } 34157 z1_0 := z1.Args[0] 34158 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 34159 continue 34160 } 34161 x := z1_0.Args[0] 34162 z2 := v_0_1 34163 if !(z1 == z2) { 34164 continue 34165 } 34166 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 34167 v0.AuxInt = int8ToAuxInt(63) 34168 v0.AddArg(x) 34169 b.resetWithControl(BlockAMD64ULT, v0) 34170 return true 34171 } 34172 break 34173 } 34174 // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 34175 // cond: z1==z2 34176 // result: (ULT (BTQconst [31] x)) 34177 for b.Controls[0].Op == OpAMD64TESTL { 34178 v_0 := b.Controls[0] 34179 _ = v_0.Args[1] 34180 v_0_0 := v_0.Args[0] 34181 v_0_1 := v_0.Args[1] 34182 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 34183 z1 := v_0_0 34184 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { 34185 continue 34186 } 34187 z1_0 := z1.Args[0] 34188 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { 34189 continue 34190 } 34191 x := z1_0.Args[0] 34192 z2 := v_0_1 34193 if !(z1 == z2) { 34194 continue 34195 } 34196 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 34197 v0.AuxInt = int8ToAuxInt(31) 34198 v0.AddArg(x) 34199 b.resetWithControl(BlockAMD64ULT, v0) 34200 return true 34201 } 34202 break 34203 } 34204 // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 34205 // cond: z1==z2 34206 // result: (ULT (BTQconst [0] x)) 34207 for b.Controls[0].Op == OpAMD64TESTQ { 34208 v_0 := b.Controls[0] 34209 _ = v_0.Args[1] 34210 v_0_0 := v_0.Args[0] 34211 v_0_1 := v_0.Args[1] 34212 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 34213 z1 := v_0_0 34214 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 34215 continue 34216 } 34217 z1_0 := z1.Args[0] 34218 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 34219 continue 34220 } 34221 x := z1_0.Args[0] 34222 z2 := v_0_1 34223 if !(z1 == z2) { 34224 continue 34225 } 34226 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 34227 v0.AuxInt = int8ToAuxInt(0) 34228 v0.AddArg(x) 34229 b.resetWithControl(BlockAMD64ULT, v0) 34230 return true 34231 } 34232 break 34233 } 34234 // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 34235 // cond: z1==z2 34236 // result: (ULT (BTLconst [0] x)) 34237 for b.Controls[0].Op == OpAMD64TESTL { 34238 v_0 := b.Controls[0] 34239 _ = v_0.Args[1] 34240 v_0_0 := v_0.Args[0] 34241 v_0_1 := v_0.Args[1] 34242 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 34243 z1 := v_0_0 34244 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 34245 continue 34246 } 34247 z1_0 := z1.Args[0] 34248 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 34249 continue 34250 } 34251 x := z1_0.Args[0] 34252 z2 := v_0_1 34253 if !(z1 == z2) { 34254 continue 34255 } 34256 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 34257 v0.AuxInt = int8ToAuxInt(0) 34258 v0.AddArg(x) 34259 b.resetWithControl(BlockAMD64ULT, v0) 34260 return true 34261 } 34262 break 34263 } 34264 // match: (NE (TESTQ z1:(SHRQconst [63] x) z2)) 34265 // cond: z1==z2 34266 // result: (ULT (BTQconst [63] x)) 34267 for b.Controls[0].Op == OpAMD64TESTQ { 34268 v_0 := b.Controls[0] 34269 _ = v_0.Args[1] 34270 v_0_0 := v_0.Args[0] 34271 v_0_1 := v_0.Args[1] 34272 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 34273 z1 := v_0_0 34274 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 34275 continue 34276 } 34277 x := z1.Args[0] 34278 z2 := v_0_1 34279 if !(z1 == z2) { 34280 continue 34281 } 34282 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 34283 v0.AuxInt = int8ToAuxInt(63) 34284 v0.AddArg(x) 34285 b.resetWithControl(BlockAMD64ULT, v0) 34286 return true 34287 } 34288 break 34289 } 34290 // match: (NE (TESTL z1:(SHRLconst [31] x) z2)) 34291 // cond: z1==z2 34292 // result: (ULT (BTLconst [31] x)) 34293 for b.Controls[0].Op == OpAMD64TESTL { 34294 v_0 := b.Controls[0] 34295 _ = v_0.Args[1] 34296 v_0_0 := v_0.Args[0] 34297 v_0_1 := v_0.Args[1] 34298 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 34299 z1 := v_0_0 34300 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 34301 continue 34302 } 34303 x := z1.Args[0] 34304 z2 := v_0_1 34305 if !(z1 == z2) { 34306 continue 34307 } 34308 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 34309 v0.AuxInt = int8ToAuxInt(31) 34310 v0.AddArg(x) 34311 b.resetWithControl(BlockAMD64ULT, v0) 34312 return true 34313 } 34314 break 34315 } 34316 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 34317 // result: (UGT cmp yes no) 34318 for b.Controls[0].Op == OpAMD64TESTB { 34319 v_0 := b.Controls[0] 34320 _ = v_0.Args[1] 34321 v_0_0 := v_0.Args[0] 34322 if v_0_0.Op != OpAMD64SETGF { 34323 break 34324 } 34325 cmp := v_0_0.Args[0] 34326 v_0_1 := v_0.Args[1] 34327 if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] { 34328 break 34329 } 34330 b.resetWithControl(BlockAMD64UGT, cmp) 34331 return true 34332 } 34333 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 34334 // result: (UGE cmp yes no) 34335 for b.Controls[0].Op == OpAMD64TESTB { 34336 v_0 := b.Controls[0] 34337 _ = v_0.Args[1] 34338 v_0_0 := v_0.Args[0] 34339 if v_0_0.Op != OpAMD64SETGEF { 34340 break 34341 } 34342 cmp := v_0_0.Args[0] 34343 v_0_1 := v_0.Args[1] 34344 if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] { 34345 break 34346 } 34347 b.resetWithControl(BlockAMD64UGE, cmp) 34348 return true 34349 } 34350 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 34351 // result: (EQF cmp yes no) 34352 for b.Controls[0].Op == OpAMD64TESTB { 34353 v_0 := b.Controls[0] 34354 _ = v_0.Args[1] 34355 v_0_0 := v_0.Args[0] 34356 if v_0_0.Op != OpAMD64SETEQF { 34357 break 34358 } 34359 cmp := v_0_0.Args[0] 34360 v_0_1 := v_0.Args[1] 34361 if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] { 34362 break 34363 } 34364 b.resetWithControl(BlockAMD64EQF, cmp) 34365 return true 34366 } 34367 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 34368 // result: (NEF cmp yes no) 34369 for b.Controls[0].Op == OpAMD64TESTB { 34370 v_0 := b.Controls[0] 34371 _ = v_0.Args[1] 34372 v_0_0 := v_0.Args[0] 34373 if v_0_0.Op != OpAMD64SETNEF { 34374 break 34375 } 34376 cmp := v_0_0.Args[0] 34377 v_0_1 := v_0.Args[1] 34378 if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] { 34379 break 34380 } 34381 b.resetWithControl(BlockAMD64NEF, cmp) 34382 return true 34383 } 34384 // match: (NE (InvertFlags cmp) yes no) 34385 // result: (NE cmp yes no) 34386 for b.Controls[0].Op == OpAMD64InvertFlags { 34387 v_0 := b.Controls[0] 34388 cmp := v_0.Args[0] 34389 b.resetWithControl(BlockAMD64NE, cmp) 34390 return true 34391 } 34392 // match: (NE (FlagEQ) yes no) 34393 // result: (First no yes) 34394 for b.Controls[0].Op == OpAMD64FlagEQ { 34395 b.Reset(BlockFirst) 34396 b.swapSuccessors() 34397 return true 34398 } 34399 // match: (NE (FlagLT_ULT) yes no) 34400 // result: (First yes no) 34401 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 34402 b.Reset(BlockFirst) 34403 return true 34404 } 34405 // match: (NE (FlagLT_UGT) yes no) 34406 // result: (First yes no) 34407 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 34408 b.Reset(BlockFirst) 34409 return true 34410 } 34411 // match: (NE (FlagGT_ULT) yes no) 34412 // result: (First yes no) 34413 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 34414 b.Reset(BlockFirst) 34415 return true 34416 } 34417 // match: (NE (FlagGT_UGT) yes no) 34418 // result: (First yes no) 34419 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 34420 b.Reset(BlockFirst) 34421 return true 34422 } 34423 case BlockAMD64UGE: 34424 // match: (UGE (TESTQ x x) yes no) 34425 // result: (First yes no) 34426 for b.Controls[0].Op == OpAMD64TESTQ { 34427 v_0 := b.Controls[0] 34428 x := v_0.Args[1] 34429 if x != v_0.Args[0] { 34430 break 34431 } 34432 b.Reset(BlockFirst) 34433 return true 34434 } 34435 // match: (UGE (TESTL x x) yes no) 34436 // result: (First yes no) 34437 for b.Controls[0].Op == OpAMD64TESTL { 34438 v_0 := b.Controls[0] 34439 x := v_0.Args[1] 34440 if x != v_0.Args[0] { 34441 break 34442 } 34443 b.Reset(BlockFirst) 34444 return true 34445 } 34446 // match: (UGE (TESTW x x) yes no) 34447 // result: (First yes no) 34448 for b.Controls[0].Op == OpAMD64TESTW { 34449 v_0 := b.Controls[0] 34450 x := v_0.Args[1] 34451 if x != v_0.Args[0] { 34452 break 34453 } 34454 b.Reset(BlockFirst) 34455 return true 34456 } 34457 // match: (UGE (TESTB x x) yes no) 34458 // result: (First yes no) 34459 for b.Controls[0].Op == OpAMD64TESTB { 34460 v_0 := b.Controls[0] 34461 x := v_0.Args[1] 34462 if x != v_0.Args[0] { 34463 break 34464 } 34465 b.Reset(BlockFirst) 34466 return true 34467 } 34468 // match: (UGE (InvertFlags cmp) yes no) 34469 // result: (ULE cmp yes no) 34470 for b.Controls[0].Op == OpAMD64InvertFlags { 34471 v_0 := b.Controls[0] 34472 cmp := v_0.Args[0] 34473 b.resetWithControl(BlockAMD64ULE, cmp) 34474 return true 34475 } 34476 // match: (UGE (FlagEQ) yes no) 34477 // result: (First yes no) 34478 for b.Controls[0].Op == OpAMD64FlagEQ { 34479 b.Reset(BlockFirst) 34480 return true 34481 } 34482 // match: (UGE (FlagLT_ULT) yes no) 34483 // result: (First no yes) 34484 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 34485 b.Reset(BlockFirst) 34486 b.swapSuccessors() 34487 return true 34488 } 34489 // match: (UGE (FlagLT_UGT) yes no) 34490 // result: (First yes no) 34491 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 34492 b.Reset(BlockFirst) 34493 return true 34494 } 34495 // match: (UGE (FlagGT_ULT) yes no) 34496 // result: (First no yes) 34497 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 34498 b.Reset(BlockFirst) 34499 b.swapSuccessors() 34500 return true 34501 } 34502 // match: (UGE (FlagGT_UGT) yes no) 34503 // result: (First yes no) 34504 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 34505 b.Reset(BlockFirst) 34506 return true 34507 } 34508 case BlockAMD64UGT: 34509 // match: (UGT (InvertFlags cmp) yes no) 34510 // result: (ULT cmp yes no) 34511 for b.Controls[0].Op == OpAMD64InvertFlags { 34512 v_0 := b.Controls[0] 34513 cmp := v_0.Args[0] 34514 b.resetWithControl(BlockAMD64ULT, cmp) 34515 return true 34516 } 34517 // match: (UGT (FlagEQ) yes no) 34518 // result: (First no yes) 34519 for b.Controls[0].Op == OpAMD64FlagEQ { 34520 b.Reset(BlockFirst) 34521 b.swapSuccessors() 34522 return true 34523 } 34524 // match: (UGT (FlagLT_ULT) yes no) 34525 // result: (First no yes) 34526 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 34527 b.Reset(BlockFirst) 34528 b.swapSuccessors() 34529 return true 34530 } 34531 // match: (UGT (FlagLT_UGT) yes no) 34532 // result: (First yes no) 34533 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 34534 b.Reset(BlockFirst) 34535 return true 34536 } 34537 // match: (UGT (FlagGT_ULT) yes no) 34538 // result: (First no yes) 34539 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 34540 b.Reset(BlockFirst) 34541 b.swapSuccessors() 34542 return true 34543 } 34544 // match: (UGT (FlagGT_UGT) yes no) 34545 // result: (First yes no) 34546 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 34547 b.Reset(BlockFirst) 34548 return true 34549 } 34550 case BlockAMD64ULE: 34551 // match: (ULE (InvertFlags cmp) yes no) 34552 // result: (UGE cmp yes no) 34553 for b.Controls[0].Op == OpAMD64InvertFlags { 34554 v_0 := b.Controls[0] 34555 cmp := v_0.Args[0] 34556 b.resetWithControl(BlockAMD64UGE, cmp) 34557 return true 34558 } 34559 // match: (ULE (FlagEQ) yes no) 34560 // result: (First yes no) 34561 for b.Controls[0].Op == OpAMD64FlagEQ { 34562 b.Reset(BlockFirst) 34563 return true 34564 } 34565 // match: (ULE (FlagLT_ULT) yes no) 34566 // result: (First yes no) 34567 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 34568 b.Reset(BlockFirst) 34569 return true 34570 } 34571 // match: (ULE (FlagLT_UGT) yes no) 34572 // result: (First no yes) 34573 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 34574 b.Reset(BlockFirst) 34575 b.swapSuccessors() 34576 return true 34577 } 34578 // match: (ULE (FlagGT_ULT) yes no) 34579 // result: (First yes no) 34580 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 34581 b.Reset(BlockFirst) 34582 return true 34583 } 34584 // match: (ULE (FlagGT_UGT) yes no) 34585 // result: (First no yes) 34586 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 34587 b.Reset(BlockFirst) 34588 b.swapSuccessors() 34589 return true 34590 } 34591 case BlockAMD64ULT: 34592 // match: (ULT (TESTQ x x) yes no) 34593 // result: (First no yes) 34594 for b.Controls[0].Op == OpAMD64TESTQ { 34595 v_0 := b.Controls[0] 34596 x := v_0.Args[1] 34597 if x != v_0.Args[0] { 34598 break 34599 } 34600 b.Reset(BlockFirst) 34601 b.swapSuccessors() 34602 return true 34603 } 34604 // match: (ULT (TESTL x x) yes no) 34605 // result: (First no yes) 34606 for b.Controls[0].Op == OpAMD64TESTL { 34607 v_0 := b.Controls[0] 34608 x := v_0.Args[1] 34609 if x != v_0.Args[0] { 34610 break 34611 } 34612 b.Reset(BlockFirst) 34613 b.swapSuccessors() 34614 return true 34615 } 34616 // match: (ULT (TESTW x x) yes no) 34617 // result: (First no yes) 34618 for b.Controls[0].Op == OpAMD64TESTW { 34619 v_0 := b.Controls[0] 34620 x := v_0.Args[1] 34621 if x != v_0.Args[0] { 34622 break 34623 } 34624 b.Reset(BlockFirst) 34625 b.swapSuccessors() 34626 return true 34627 } 34628 // match: (ULT (TESTB x x) yes no) 34629 // result: (First no yes) 34630 for b.Controls[0].Op == OpAMD64TESTB { 34631 v_0 := b.Controls[0] 34632 x := v_0.Args[1] 34633 if x != v_0.Args[0] { 34634 break 34635 } 34636 b.Reset(BlockFirst) 34637 b.swapSuccessors() 34638 return true 34639 } 34640 // match: (ULT (InvertFlags cmp) yes no) 34641 // result: (UGT cmp yes no) 34642 for b.Controls[0].Op == OpAMD64InvertFlags { 34643 v_0 := b.Controls[0] 34644 cmp := v_0.Args[0] 34645 b.resetWithControl(BlockAMD64UGT, cmp) 34646 return true 34647 } 34648 // match: (ULT (FlagEQ) yes no) 34649 // result: (First no yes) 34650 for b.Controls[0].Op == OpAMD64FlagEQ { 34651 b.Reset(BlockFirst) 34652 b.swapSuccessors() 34653 return true 34654 } 34655 // match: (ULT (FlagLT_ULT) yes no) 34656 // result: (First yes no) 34657 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 34658 b.Reset(BlockFirst) 34659 return true 34660 } 34661 // match: (ULT (FlagLT_UGT) yes no) 34662 // result: (First no yes) 34663 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 34664 b.Reset(BlockFirst) 34665 b.swapSuccessors() 34666 return true 34667 } 34668 // match: (ULT (FlagGT_ULT) yes no) 34669 // result: (First yes no) 34670 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 34671 b.Reset(BlockFirst) 34672 return true 34673 } 34674 // match: (ULT (FlagGT_UGT) yes no) 34675 // result: (First no yes) 34676 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 34677 b.Reset(BlockFirst) 34678 b.swapSuccessors() 34679 return true 34680 } 34681 } 34682 return false 34683 }