github.com/bir3/gocompiler@v0.9.2202/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from _gen/AMD64.rules using 'go generate'; DO NOT EDIT. 2 3 package ssa 4 5 import "github.com/bir3/gocompiler/src/internal/buildcfg" 6 import "math" 7 import "github.com/bir3/gocompiler/src/cmd/internal/obj" 8 import "github.com/bir3/gocompiler/src/cmd/compile/internal/types" 9 10 func rewriteValueAMD64(v *Value) bool { 11 switch v.Op { 12 case OpAMD64ADCQ: 13 return rewriteValueAMD64_OpAMD64ADCQ(v) 14 case OpAMD64ADCQconst: 15 return rewriteValueAMD64_OpAMD64ADCQconst(v) 16 case OpAMD64ADDL: 17 return rewriteValueAMD64_OpAMD64ADDL(v) 18 case OpAMD64ADDLconst: 19 return rewriteValueAMD64_OpAMD64ADDLconst(v) 20 case OpAMD64ADDLconstmodify: 21 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v) 22 case OpAMD64ADDLload: 23 return rewriteValueAMD64_OpAMD64ADDLload(v) 24 case OpAMD64ADDLmodify: 25 return rewriteValueAMD64_OpAMD64ADDLmodify(v) 26 case OpAMD64ADDQ: 27 return rewriteValueAMD64_OpAMD64ADDQ(v) 28 case OpAMD64ADDQcarry: 29 return rewriteValueAMD64_OpAMD64ADDQcarry(v) 30 case OpAMD64ADDQconst: 31 return rewriteValueAMD64_OpAMD64ADDQconst(v) 32 case OpAMD64ADDQconstmodify: 33 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v) 34 case OpAMD64ADDQload: 35 return rewriteValueAMD64_OpAMD64ADDQload(v) 36 case OpAMD64ADDQmodify: 37 return rewriteValueAMD64_OpAMD64ADDQmodify(v) 38 case OpAMD64ADDSD: 39 return rewriteValueAMD64_OpAMD64ADDSD(v) 40 case OpAMD64ADDSDload: 41 return rewriteValueAMD64_OpAMD64ADDSDload(v) 42 case OpAMD64ADDSS: 43 return rewriteValueAMD64_OpAMD64ADDSS(v) 44 case OpAMD64ADDSSload: 45 return rewriteValueAMD64_OpAMD64ADDSSload(v) 46 case OpAMD64ANDL: 47 return rewriteValueAMD64_OpAMD64ANDL(v) 48 case OpAMD64ANDLconst: 49 return rewriteValueAMD64_OpAMD64ANDLconst(v) 50 case OpAMD64ANDLconstmodify: 51 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v) 52 case OpAMD64ANDLload: 53 return rewriteValueAMD64_OpAMD64ANDLload(v) 54 case OpAMD64ANDLmodify: 55 return rewriteValueAMD64_OpAMD64ANDLmodify(v) 56 case OpAMD64ANDNL: 57 return rewriteValueAMD64_OpAMD64ANDNL(v) 58 case OpAMD64ANDNQ: 59 return rewriteValueAMD64_OpAMD64ANDNQ(v) 60 case OpAMD64ANDQ: 61 return rewriteValueAMD64_OpAMD64ANDQ(v) 62 case OpAMD64ANDQconst: 63 return rewriteValueAMD64_OpAMD64ANDQconst(v) 64 case OpAMD64ANDQconstmodify: 65 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v) 66 case OpAMD64ANDQload: 67 return rewriteValueAMD64_OpAMD64ANDQload(v) 68 case OpAMD64ANDQmodify: 69 return rewriteValueAMD64_OpAMD64ANDQmodify(v) 70 case OpAMD64BSFQ: 71 return rewriteValueAMD64_OpAMD64BSFQ(v) 72 case OpAMD64BSWAPL: 73 return rewriteValueAMD64_OpAMD64BSWAPL(v) 74 case OpAMD64BSWAPQ: 75 return rewriteValueAMD64_OpAMD64BSWAPQ(v) 76 case OpAMD64BTCQconst: 77 return rewriteValueAMD64_OpAMD64BTCQconst(v) 78 case OpAMD64BTLconst: 79 return rewriteValueAMD64_OpAMD64BTLconst(v) 80 case OpAMD64BTQconst: 81 return rewriteValueAMD64_OpAMD64BTQconst(v) 82 case OpAMD64BTRQconst: 83 return rewriteValueAMD64_OpAMD64BTRQconst(v) 84 case OpAMD64BTSQconst: 85 return rewriteValueAMD64_OpAMD64BTSQconst(v) 86 case OpAMD64CMOVLCC: 87 return rewriteValueAMD64_OpAMD64CMOVLCC(v) 88 case OpAMD64CMOVLCS: 89 return rewriteValueAMD64_OpAMD64CMOVLCS(v) 90 case OpAMD64CMOVLEQ: 91 return rewriteValueAMD64_OpAMD64CMOVLEQ(v) 92 case OpAMD64CMOVLGE: 93 return rewriteValueAMD64_OpAMD64CMOVLGE(v) 94 case OpAMD64CMOVLGT: 95 return rewriteValueAMD64_OpAMD64CMOVLGT(v) 96 case OpAMD64CMOVLHI: 97 return rewriteValueAMD64_OpAMD64CMOVLHI(v) 98 case OpAMD64CMOVLLE: 99 return rewriteValueAMD64_OpAMD64CMOVLLE(v) 100 case OpAMD64CMOVLLS: 101 return rewriteValueAMD64_OpAMD64CMOVLLS(v) 102 case OpAMD64CMOVLLT: 103 return rewriteValueAMD64_OpAMD64CMOVLLT(v) 104 case OpAMD64CMOVLNE: 105 return rewriteValueAMD64_OpAMD64CMOVLNE(v) 106 case OpAMD64CMOVQCC: 107 return rewriteValueAMD64_OpAMD64CMOVQCC(v) 108 case OpAMD64CMOVQCS: 109 return rewriteValueAMD64_OpAMD64CMOVQCS(v) 110 case OpAMD64CMOVQEQ: 111 return rewriteValueAMD64_OpAMD64CMOVQEQ(v) 112 case OpAMD64CMOVQGE: 113 return rewriteValueAMD64_OpAMD64CMOVQGE(v) 114 case OpAMD64CMOVQGT: 115 return rewriteValueAMD64_OpAMD64CMOVQGT(v) 116 case OpAMD64CMOVQHI: 117 return rewriteValueAMD64_OpAMD64CMOVQHI(v) 118 case OpAMD64CMOVQLE: 119 return rewriteValueAMD64_OpAMD64CMOVQLE(v) 120 case OpAMD64CMOVQLS: 121 return rewriteValueAMD64_OpAMD64CMOVQLS(v) 122 case OpAMD64CMOVQLT: 123 return rewriteValueAMD64_OpAMD64CMOVQLT(v) 124 case OpAMD64CMOVQNE: 125 return rewriteValueAMD64_OpAMD64CMOVQNE(v) 126 case OpAMD64CMOVWCC: 127 return rewriteValueAMD64_OpAMD64CMOVWCC(v) 128 case OpAMD64CMOVWCS: 129 return rewriteValueAMD64_OpAMD64CMOVWCS(v) 130 case OpAMD64CMOVWEQ: 131 return rewriteValueAMD64_OpAMD64CMOVWEQ(v) 132 case OpAMD64CMOVWGE: 133 return rewriteValueAMD64_OpAMD64CMOVWGE(v) 134 case OpAMD64CMOVWGT: 135 return rewriteValueAMD64_OpAMD64CMOVWGT(v) 136 case OpAMD64CMOVWHI: 137 return rewriteValueAMD64_OpAMD64CMOVWHI(v) 138 case OpAMD64CMOVWLE: 139 return rewriteValueAMD64_OpAMD64CMOVWLE(v) 140 case OpAMD64CMOVWLS: 141 return rewriteValueAMD64_OpAMD64CMOVWLS(v) 142 case OpAMD64CMOVWLT: 143 return rewriteValueAMD64_OpAMD64CMOVWLT(v) 144 case OpAMD64CMOVWNE: 145 return rewriteValueAMD64_OpAMD64CMOVWNE(v) 146 case OpAMD64CMPB: 147 return rewriteValueAMD64_OpAMD64CMPB(v) 148 case OpAMD64CMPBconst: 149 return rewriteValueAMD64_OpAMD64CMPBconst(v) 150 case OpAMD64CMPBconstload: 151 return rewriteValueAMD64_OpAMD64CMPBconstload(v) 152 case OpAMD64CMPBload: 153 return rewriteValueAMD64_OpAMD64CMPBload(v) 154 case OpAMD64CMPL: 155 return rewriteValueAMD64_OpAMD64CMPL(v) 156 case OpAMD64CMPLconst: 157 return rewriteValueAMD64_OpAMD64CMPLconst(v) 158 case OpAMD64CMPLconstload: 159 return rewriteValueAMD64_OpAMD64CMPLconstload(v) 160 case OpAMD64CMPLload: 161 return rewriteValueAMD64_OpAMD64CMPLload(v) 162 case OpAMD64CMPQ: 163 return rewriteValueAMD64_OpAMD64CMPQ(v) 164 case OpAMD64CMPQconst: 165 return rewriteValueAMD64_OpAMD64CMPQconst(v) 166 case OpAMD64CMPQconstload: 167 return rewriteValueAMD64_OpAMD64CMPQconstload(v) 168 case OpAMD64CMPQload: 169 return rewriteValueAMD64_OpAMD64CMPQload(v) 170 case OpAMD64CMPW: 171 return rewriteValueAMD64_OpAMD64CMPW(v) 172 case OpAMD64CMPWconst: 173 return rewriteValueAMD64_OpAMD64CMPWconst(v) 174 case OpAMD64CMPWconstload: 175 return rewriteValueAMD64_OpAMD64CMPWconstload(v) 176 case OpAMD64CMPWload: 177 return rewriteValueAMD64_OpAMD64CMPWload(v) 178 case OpAMD64CMPXCHGLlock: 179 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v) 180 case OpAMD64CMPXCHGQlock: 181 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v) 182 case OpAMD64DIVSD: 183 return rewriteValueAMD64_OpAMD64DIVSD(v) 184 case OpAMD64DIVSDload: 185 return rewriteValueAMD64_OpAMD64DIVSDload(v) 186 case OpAMD64DIVSS: 187 return rewriteValueAMD64_OpAMD64DIVSS(v) 188 case OpAMD64DIVSSload: 189 return rewriteValueAMD64_OpAMD64DIVSSload(v) 190 case OpAMD64HMULL: 191 return rewriteValueAMD64_OpAMD64HMULL(v) 192 case OpAMD64HMULLU: 193 return rewriteValueAMD64_OpAMD64HMULLU(v) 194 case OpAMD64HMULQ: 195 return rewriteValueAMD64_OpAMD64HMULQ(v) 196 case OpAMD64HMULQU: 197 return rewriteValueAMD64_OpAMD64HMULQU(v) 198 case OpAMD64LEAL: 199 return rewriteValueAMD64_OpAMD64LEAL(v) 200 case OpAMD64LEAL1: 201 return rewriteValueAMD64_OpAMD64LEAL1(v) 202 case OpAMD64LEAL2: 203 return rewriteValueAMD64_OpAMD64LEAL2(v) 204 case OpAMD64LEAL4: 205 return rewriteValueAMD64_OpAMD64LEAL4(v) 206 case OpAMD64LEAL8: 207 return rewriteValueAMD64_OpAMD64LEAL8(v) 208 case OpAMD64LEAQ: 209 return rewriteValueAMD64_OpAMD64LEAQ(v) 210 case OpAMD64LEAQ1: 211 return rewriteValueAMD64_OpAMD64LEAQ1(v) 212 case OpAMD64LEAQ2: 213 return rewriteValueAMD64_OpAMD64LEAQ2(v) 214 case OpAMD64LEAQ4: 215 return rewriteValueAMD64_OpAMD64LEAQ4(v) 216 case OpAMD64LEAQ8: 217 return rewriteValueAMD64_OpAMD64LEAQ8(v) 218 case OpAMD64MOVBELstore: 219 return rewriteValueAMD64_OpAMD64MOVBELstore(v) 220 case OpAMD64MOVBEQstore: 221 return rewriteValueAMD64_OpAMD64MOVBEQstore(v) 222 case OpAMD64MOVBEWstore: 223 return rewriteValueAMD64_OpAMD64MOVBEWstore(v) 224 case OpAMD64MOVBQSX: 225 return rewriteValueAMD64_OpAMD64MOVBQSX(v) 226 case OpAMD64MOVBQSXload: 227 return rewriteValueAMD64_OpAMD64MOVBQSXload(v) 228 case OpAMD64MOVBQZX: 229 return rewriteValueAMD64_OpAMD64MOVBQZX(v) 230 case OpAMD64MOVBatomicload: 231 return rewriteValueAMD64_OpAMD64MOVBatomicload(v) 232 case OpAMD64MOVBload: 233 return rewriteValueAMD64_OpAMD64MOVBload(v) 234 case OpAMD64MOVBstore: 235 return rewriteValueAMD64_OpAMD64MOVBstore(v) 236 case OpAMD64MOVBstoreconst: 237 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v) 238 case OpAMD64MOVLQSX: 239 return rewriteValueAMD64_OpAMD64MOVLQSX(v) 240 case OpAMD64MOVLQSXload: 241 return rewriteValueAMD64_OpAMD64MOVLQSXload(v) 242 case OpAMD64MOVLQZX: 243 return rewriteValueAMD64_OpAMD64MOVLQZX(v) 244 case OpAMD64MOVLatomicload: 245 return rewriteValueAMD64_OpAMD64MOVLatomicload(v) 246 case OpAMD64MOVLf2i: 247 return rewriteValueAMD64_OpAMD64MOVLf2i(v) 248 case OpAMD64MOVLi2f: 249 return rewriteValueAMD64_OpAMD64MOVLi2f(v) 250 case OpAMD64MOVLload: 251 return rewriteValueAMD64_OpAMD64MOVLload(v) 252 case OpAMD64MOVLstore: 253 return rewriteValueAMD64_OpAMD64MOVLstore(v) 254 case OpAMD64MOVLstoreconst: 255 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v) 256 case OpAMD64MOVOload: 257 return rewriteValueAMD64_OpAMD64MOVOload(v) 258 case OpAMD64MOVOstore: 259 return rewriteValueAMD64_OpAMD64MOVOstore(v) 260 case OpAMD64MOVOstoreconst: 261 return rewriteValueAMD64_OpAMD64MOVOstoreconst(v) 262 case OpAMD64MOVQatomicload: 263 return rewriteValueAMD64_OpAMD64MOVQatomicload(v) 264 case OpAMD64MOVQf2i: 265 return rewriteValueAMD64_OpAMD64MOVQf2i(v) 266 case OpAMD64MOVQi2f: 267 return rewriteValueAMD64_OpAMD64MOVQi2f(v) 268 case OpAMD64MOVQload: 269 return rewriteValueAMD64_OpAMD64MOVQload(v) 270 case OpAMD64MOVQstore: 271 return rewriteValueAMD64_OpAMD64MOVQstore(v) 272 case OpAMD64MOVQstoreconst: 273 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v) 274 case OpAMD64MOVSDload: 275 return rewriteValueAMD64_OpAMD64MOVSDload(v) 276 case OpAMD64MOVSDstore: 277 return rewriteValueAMD64_OpAMD64MOVSDstore(v) 278 case OpAMD64MOVSSload: 279 return rewriteValueAMD64_OpAMD64MOVSSload(v) 280 case OpAMD64MOVSSstore: 281 return rewriteValueAMD64_OpAMD64MOVSSstore(v) 282 case OpAMD64MOVWQSX: 283 return rewriteValueAMD64_OpAMD64MOVWQSX(v) 284 case OpAMD64MOVWQSXload: 285 return rewriteValueAMD64_OpAMD64MOVWQSXload(v) 286 case OpAMD64MOVWQZX: 287 return rewriteValueAMD64_OpAMD64MOVWQZX(v) 288 case OpAMD64MOVWload: 289 return rewriteValueAMD64_OpAMD64MOVWload(v) 290 case OpAMD64MOVWstore: 291 return rewriteValueAMD64_OpAMD64MOVWstore(v) 292 case OpAMD64MOVWstoreconst: 293 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v) 294 case OpAMD64MULL: 295 return rewriteValueAMD64_OpAMD64MULL(v) 296 case OpAMD64MULLconst: 297 return rewriteValueAMD64_OpAMD64MULLconst(v) 298 case OpAMD64MULQ: 299 return rewriteValueAMD64_OpAMD64MULQ(v) 300 case OpAMD64MULQconst: 301 return rewriteValueAMD64_OpAMD64MULQconst(v) 302 case OpAMD64MULSD: 303 return rewriteValueAMD64_OpAMD64MULSD(v) 304 case OpAMD64MULSDload: 305 return rewriteValueAMD64_OpAMD64MULSDload(v) 306 case OpAMD64MULSS: 307 return rewriteValueAMD64_OpAMD64MULSS(v) 308 case OpAMD64MULSSload: 309 return rewriteValueAMD64_OpAMD64MULSSload(v) 310 case OpAMD64NEGL: 311 return rewriteValueAMD64_OpAMD64NEGL(v) 312 case OpAMD64NEGQ: 313 return rewriteValueAMD64_OpAMD64NEGQ(v) 314 case OpAMD64NOTL: 315 return rewriteValueAMD64_OpAMD64NOTL(v) 316 case OpAMD64NOTQ: 317 return rewriteValueAMD64_OpAMD64NOTQ(v) 318 case OpAMD64ORL: 319 return rewriteValueAMD64_OpAMD64ORL(v) 320 case OpAMD64ORLconst: 321 return rewriteValueAMD64_OpAMD64ORLconst(v) 322 case OpAMD64ORLconstmodify: 323 return rewriteValueAMD64_OpAMD64ORLconstmodify(v) 324 case OpAMD64ORLload: 325 return rewriteValueAMD64_OpAMD64ORLload(v) 326 case OpAMD64ORLmodify: 327 return rewriteValueAMD64_OpAMD64ORLmodify(v) 328 case OpAMD64ORQ: 329 return rewriteValueAMD64_OpAMD64ORQ(v) 330 case OpAMD64ORQconst: 331 return rewriteValueAMD64_OpAMD64ORQconst(v) 332 case OpAMD64ORQconstmodify: 333 return rewriteValueAMD64_OpAMD64ORQconstmodify(v) 334 case OpAMD64ORQload: 335 return rewriteValueAMD64_OpAMD64ORQload(v) 336 case OpAMD64ORQmodify: 337 return rewriteValueAMD64_OpAMD64ORQmodify(v) 338 case OpAMD64ROLB: 339 return rewriteValueAMD64_OpAMD64ROLB(v) 340 case OpAMD64ROLBconst: 341 return rewriteValueAMD64_OpAMD64ROLBconst(v) 342 case OpAMD64ROLL: 343 return rewriteValueAMD64_OpAMD64ROLL(v) 344 case OpAMD64ROLLconst: 345 return rewriteValueAMD64_OpAMD64ROLLconst(v) 346 case OpAMD64ROLQ: 347 return rewriteValueAMD64_OpAMD64ROLQ(v) 348 case OpAMD64ROLQconst: 349 return rewriteValueAMD64_OpAMD64ROLQconst(v) 350 case OpAMD64ROLW: 351 return rewriteValueAMD64_OpAMD64ROLW(v) 352 case OpAMD64ROLWconst: 353 return rewriteValueAMD64_OpAMD64ROLWconst(v) 354 case OpAMD64RORB: 355 return rewriteValueAMD64_OpAMD64RORB(v) 356 case OpAMD64RORL: 357 return rewriteValueAMD64_OpAMD64RORL(v) 358 case OpAMD64RORQ: 359 return rewriteValueAMD64_OpAMD64RORQ(v) 360 case OpAMD64RORW: 361 return rewriteValueAMD64_OpAMD64RORW(v) 362 case OpAMD64SARB: 363 return rewriteValueAMD64_OpAMD64SARB(v) 364 case OpAMD64SARBconst: 365 return rewriteValueAMD64_OpAMD64SARBconst(v) 366 case OpAMD64SARL: 367 return rewriteValueAMD64_OpAMD64SARL(v) 368 case OpAMD64SARLconst: 369 return rewriteValueAMD64_OpAMD64SARLconst(v) 370 case OpAMD64SARQ: 371 return rewriteValueAMD64_OpAMD64SARQ(v) 372 case OpAMD64SARQconst: 373 return rewriteValueAMD64_OpAMD64SARQconst(v) 374 case OpAMD64SARW: 375 return rewriteValueAMD64_OpAMD64SARW(v) 376 case OpAMD64SARWconst: 377 return rewriteValueAMD64_OpAMD64SARWconst(v) 378 case OpAMD64SARXLload: 379 return rewriteValueAMD64_OpAMD64SARXLload(v) 380 case OpAMD64SARXQload: 381 return rewriteValueAMD64_OpAMD64SARXQload(v) 382 case OpAMD64SBBLcarrymask: 383 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v) 384 case OpAMD64SBBQ: 385 return rewriteValueAMD64_OpAMD64SBBQ(v) 386 case OpAMD64SBBQcarrymask: 387 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v) 388 case OpAMD64SBBQconst: 389 return rewriteValueAMD64_OpAMD64SBBQconst(v) 390 case OpAMD64SETA: 391 return rewriteValueAMD64_OpAMD64SETA(v) 392 case OpAMD64SETAE: 393 return rewriteValueAMD64_OpAMD64SETAE(v) 394 case OpAMD64SETAEstore: 395 return rewriteValueAMD64_OpAMD64SETAEstore(v) 396 case OpAMD64SETAstore: 397 return rewriteValueAMD64_OpAMD64SETAstore(v) 398 case OpAMD64SETB: 399 return rewriteValueAMD64_OpAMD64SETB(v) 400 case OpAMD64SETBE: 401 return rewriteValueAMD64_OpAMD64SETBE(v) 402 case OpAMD64SETBEstore: 403 return rewriteValueAMD64_OpAMD64SETBEstore(v) 404 case OpAMD64SETBstore: 405 return rewriteValueAMD64_OpAMD64SETBstore(v) 406 case OpAMD64SETEQ: 407 return rewriteValueAMD64_OpAMD64SETEQ(v) 408 case OpAMD64SETEQstore: 409 return rewriteValueAMD64_OpAMD64SETEQstore(v) 410 case OpAMD64SETG: 411 return rewriteValueAMD64_OpAMD64SETG(v) 412 case OpAMD64SETGE: 413 return rewriteValueAMD64_OpAMD64SETGE(v) 414 case OpAMD64SETGEstore: 415 return rewriteValueAMD64_OpAMD64SETGEstore(v) 416 case OpAMD64SETGstore: 417 return rewriteValueAMD64_OpAMD64SETGstore(v) 418 case OpAMD64SETL: 419 return rewriteValueAMD64_OpAMD64SETL(v) 420 case OpAMD64SETLE: 421 return rewriteValueAMD64_OpAMD64SETLE(v) 422 case OpAMD64SETLEstore: 423 return rewriteValueAMD64_OpAMD64SETLEstore(v) 424 case OpAMD64SETLstore: 425 return rewriteValueAMD64_OpAMD64SETLstore(v) 426 case OpAMD64SETNE: 427 return rewriteValueAMD64_OpAMD64SETNE(v) 428 case OpAMD64SETNEstore: 429 return rewriteValueAMD64_OpAMD64SETNEstore(v) 430 case OpAMD64SHLL: 431 return rewriteValueAMD64_OpAMD64SHLL(v) 432 case OpAMD64SHLLconst: 433 return rewriteValueAMD64_OpAMD64SHLLconst(v) 434 case OpAMD64SHLQ: 435 return rewriteValueAMD64_OpAMD64SHLQ(v) 436 case OpAMD64SHLQconst: 437 return rewriteValueAMD64_OpAMD64SHLQconst(v) 438 case OpAMD64SHLXLload: 439 return rewriteValueAMD64_OpAMD64SHLXLload(v) 440 case OpAMD64SHLXQload: 441 return rewriteValueAMD64_OpAMD64SHLXQload(v) 442 case OpAMD64SHRB: 443 return rewriteValueAMD64_OpAMD64SHRB(v) 444 case OpAMD64SHRBconst: 445 return rewriteValueAMD64_OpAMD64SHRBconst(v) 446 case OpAMD64SHRL: 447 return rewriteValueAMD64_OpAMD64SHRL(v) 448 case OpAMD64SHRLconst: 449 return rewriteValueAMD64_OpAMD64SHRLconst(v) 450 case OpAMD64SHRQ: 451 return rewriteValueAMD64_OpAMD64SHRQ(v) 452 case OpAMD64SHRQconst: 453 return rewriteValueAMD64_OpAMD64SHRQconst(v) 454 case OpAMD64SHRW: 455 return rewriteValueAMD64_OpAMD64SHRW(v) 456 case OpAMD64SHRWconst: 457 return rewriteValueAMD64_OpAMD64SHRWconst(v) 458 case OpAMD64SHRXLload: 459 return rewriteValueAMD64_OpAMD64SHRXLload(v) 460 case OpAMD64SHRXQload: 461 return rewriteValueAMD64_OpAMD64SHRXQload(v) 462 case OpAMD64SUBL: 463 return rewriteValueAMD64_OpAMD64SUBL(v) 464 case OpAMD64SUBLconst: 465 return rewriteValueAMD64_OpAMD64SUBLconst(v) 466 case OpAMD64SUBLload: 467 return rewriteValueAMD64_OpAMD64SUBLload(v) 468 case OpAMD64SUBLmodify: 469 return rewriteValueAMD64_OpAMD64SUBLmodify(v) 470 case OpAMD64SUBQ: 471 return rewriteValueAMD64_OpAMD64SUBQ(v) 472 case OpAMD64SUBQborrow: 473 return rewriteValueAMD64_OpAMD64SUBQborrow(v) 474 case OpAMD64SUBQconst: 475 return rewriteValueAMD64_OpAMD64SUBQconst(v) 476 case OpAMD64SUBQload: 477 return rewriteValueAMD64_OpAMD64SUBQload(v) 478 case OpAMD64SUBQmodify: 479 return rewriteValueAMD64_OpAMD64SUBQmodify(v) 480 case OpAMD64SUBSD: 481 return rewriteValueAMD64_OpAMD64SUBSD(v) 482 case OpAMD64SUBSDload: 483 return rewriteValueAMD64_OpAMD64SUBSDload(v) 484 case OpAMD64SUBSS: 485 return rewriteValueAMD64_OpAMD64SUBSS(v) 486 case OpAMD64SUBSSload: 487 return rewriteValueAMD64_OpAMD64SUBSSload(v) 488 case OpAMD64TESTB: 489 return rewriteValueAMD64_OpAMD64TESTB(v) 490 case OpAMD64TESTBconst: 491 return rewriteValueAMD64_OpAMD64TESTBconst(v) 492 case OpAMD64TESTL: 493 return rewriteValueAMD64_OpAMD64TESTL(v) 494 case OpAMD64TESTLconst: 495 return rewriteValueAMD64_OpAMD64TESTLconst(v) 496 case OpAMD64TESTQ: 497 return rewriteValueAMD64_OpAMD64TESTQ(v) 498 case OpAMD64TESTQconst: 499 return rewriteValueAMD64_OpAMD64TESTQconst(v) 500 case OpAMD64TESTW: 501 return rewriteValueAMD64_OpAMD64TESTW(v) 502 case OpAMD64TESTWconst: 503 return rewriteValueAMD64_OpAMD64TESTWconst(v) 504 case OpAMD64XADDLlock: 505 return rewriteValueAMD64_OpAMD64XADDLlock(v) 506 case OpAMD64XADDQlock: 507 return rewriteValueAMD64_OpAMD64XADDQlock(v) 508 case OpAMD64XCHGL: 509 return rewriteValueAMD64_OpAMD64XCHGL(v) 510 case OpAMD64XCHGQ: 511 return rewriteValueAMD64_OpAMD64XCHGQ(v) 512 case OpAMD64XORL: 513 return rewriteValueAMD64_OpAMD64XORL(v) 514 case OpAMD64XORLconst: 515 return rewriteValueAMD64_OpAMD64XORLconst(v) 516 case OpAMD64XORLconstmodify: 517 return rewriteValueAMD64_OpAMD64XORLconstmodify(v) 518 case OpAMD64XORLload: 519 return rewriteValueAMD64_OpAMD64XORLload(v) 520 case OpAMD64XORLmodify: 521 return rewriteValueAMD64_OpAMD64XORLmodify(v) 522 case OpAMD64XORQ: 523 return rewriteValueAMD64_OpAMD64XORQ(v) 524 case OpAMD64XORQconst: 525 return rewriteValueAMD64_OpAMD64XORQconst(v) 526 case OpAMD64XORQconstmodify: 527 return rewriteValueAMD64_OpAMD64XORQconstmodify(v) 528 case OpAMD64XORQload: 529 return rewriteValueAMD64_OpAMD64XORQload(v) 530 case OpAMD64XORQmodify: 531 return rewriteValueAMD64_OpAMD64XORQmodify(v) 532 case OpAdd16: 533 v.Op = OpAMD64ADDL 534 return true 535 case OpAdd32: 536 v.Op = OpAMD64ADDL 537 return true 538 case OpAdd32F: 539 v.Op = OpAMD64ADDSS 540 return true 541 case OpAdd64: 542 v.Op = OpAMD64ADDQ 543 return true 544 case OpAdd64F: 545 v.Op = OpAMD64ADDSD 546 return true 547 case OpAdd8: 548 v.Op = OpAMD64ADDL 549 return true 550 case OpAddPtr: 551 v.Op = OpAMD64ADDQ 552 return true 553 case OpAddr: 554 return rewriteValueAMD64_OpAddr(v) 555 case OpAnd16: 556 v.Op = OpAMD64ANDL 557 return true 558 case OpAnd32: 559 v.Op = OpAMD64ANDL 560 return true 561 case OpAnd64: 562 v.Op = OpAMD64ANDQ 563 return true 564 case OpAnd8: 565 v.Op = OpAMD64ANDL 566 return true 567 case OpAndB: 568 v.Op = OpAMD64ANDL 569 return true 570 case OpAtomicAdd32: 571 return rewriteValueAMD64_OpAtomicAdd32(v) 572 case OpAtomicAdd64: 573 return rewriteValueAMD64_OpAtomicAdd64(v) 574 case OpAtomicAnd32: 575 return rewriteValueAMD64_OpAtomicAnd32(v) 576 case OpAtomicAnd8: 577 return rewriteValueAMD64_OpAtomicAnd8(v) 578 case OpAtomicCompareAndSwap32: 579 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v) 580 case OpAtomicCompareAndSwap64: 581 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v) 582 case OpAtomicExchange32: 583 return rewriteValueAMD64_OpAtomicExchange32(v) 584 case OpAtomicExchange64: 585 return rewriteValueAMD64_OpAtomicExchange64(v) 586 case OpAtomicLoad32: 587 return rewriteValueAMD64_OpAtomicLoad32(v) 588 case OpAtomicLoad64: 589 return rewriteValueAMD64_OpAtomicLoad64(v) 590 case OpAtomicLoad8: 591 return rewriteValueAMD64_OpAtomicLoad8(v) 592 case OpAtomicLoadPtr: 593 return rewriteValueAMD64_OpAtomicLoadPtr(v) 594 case OpAtomicOr32: 595 return rewriteValueAMD64_OpAtomicOr32(v) 596 case OpAtomicOr8: 597 return rewriteValueAMD64_OpAtomicOr8(v) 598 case OpAtomicStore32: 599 return rewriteValueAMD64_OpAtomicStore32(v) 600 case OpAtomicStore64: 601 return rewriteValueAMD64_OpAtomicStore64(v) 602 case OpAtomicStore8: 603 return rewriteValueAMD64_OpAtomicStore8(v) 604 case OpAtomicStorePtrNoWB: 605 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v) 606 case OpAvg64u: 607 v.Op = OpAMD64AVGQU 608 return true 609 case OpBitLen16: 610 return rewriteValueAMD64_OpBitLen16(v) 611 case OpBitLen32: 612 return rewriteValueAMD64_OpBitLen32(v) 613 case OpBitLen64: 614 return rewriteValueAMD64_OpBitLen64(v) 615 case OpBitLen8: 616 return rewriteValueAMD64_OpBitLen8(v) 617 case OpBswap16: 618 return rewriteValueAMD64_OpBswap16(v) 619 case OpBswap32: 620 v.Op = OpAMD64BSWAPL 621 return true 622 case OpBswap64: 623 v.Op = OpAMD64BSWAPQ 624 return true 625 case OpCeil: 626 return rewriteValueAMD64_OpCeil(v) 627 case OpClosureCall: 628 v.Op = OpAMD64CALLclosure 629 return true 630 case OpCom16: 631 v.Op = OpAMD64NOTL 632 return true 633 case OpCom32: 634 v.Op = OpAMD64NOTL 635 return true 636 case OpCom64: 637 v.Op = OpAMD64NOTQ 638 return true 639 case OpCom8: 640 v.Op = OpAMD64NOTL 641 return true 642 case OpCondSelect: 643 return rewriteValueAMD64_OpCondSelect(v) 644 case OpConst16: 645 return rewriteValueAMD64_OpConst16(v) 646 case OpConst32: 647 v.Op = OpAMD64MOVLconst 648 return true 649 case OpConst32F: 650 v.Op = OpAMD64MOVSSconst 651 return true 652 case OpConst64: 653 v.Op = OpAMD64MOVQconst 654 return true 655 case OpConst64F: 656 v.Op = OpAMD64MOVSDconst 657 return true 658 case OpConst8: 659 return rewriteValueAMD64_OpConst8(v) 660 case OpConstBool: 661 return rewriteValueAMD64_OpConstBool(v) 662 case OpConstNil: 663 return rewriteValueAMD64_OpConstNil(v) 664 case OpCtz16: 665 return rewriteValueAMD64_OpCtz16(v) 666 case OpCtz16NonZero: 667 return rewriteValueAMD64_OpCtz16NonZero(v) 668 case OpCtz32: 669 return rewriteValueAMD64_OpCtz32(v) 670 case OpCtz32NonZero: 671 return rewriteValueAMD64_OpCtz32NonZero(v) 672 case OpCtz64: 673 return rewriteValueAMD64_OpCtz64(v) 674 case OpCtz64NonZero: 675 return rewriteValueAMD64_OpCtz64NonZero(v) 676 case OpCtz8: 677 return rewriteValueAMD64_OpCtz8(v) 678 case OpCtz8NonZero: 679 return rewriteValueAMD64_OpCtz8NonZero(v) 680 case OpCvt32Fto32: 681 v.Op = OpAMD64CVTTSS2SL 682 return true 683 case OpCvt32Fto64: 684 v.Op = OpAMD64CVTTSS2SQ 685 return true 686 case OpCvt32Fto64F: 687 v.Op = OpAMD64CVTSS2SD 688 return true 689 case OpCvt32to32F: 690 v.Op = OpAMD64CVTSL2SS 691 return true 692 case OpCvt32to64F: 693 v.Op = OpAMD64CVTSL2SD 694 return true 695 case OpCvt64Fto32: 696 v.Op = OpAMD64CVTTSD2SL 697 return true 698 case OpCvt64Fto32F: 699 v.Op = OpAMD64CVTSD2SS 700 return true 701 case OpCvt64Fto64: 702 v.Op = OpAMD64CVTTSD2SQ 703 return true 704 case OpCvt64to32F: 705 v.Op = OpAMD64CVTSQ2SS 706 return true 707 case OpCvt64to64F: 708 v.Op = OpAMD64CVTSQ2SD 709 return true 710 case OpCvtBoolToUint8: 711 v.Op = OpCopy 712 return true 713 case OpDiv128u: 714 v.Op = OpAMD64DIVQU2 715 return true 716 case OpDiv16: 717 return rewriteValueAMD64_OpDiv16(v) 718 case OpDiv16u: 719 return rewriteValueAMD64_OpDiv16u(v) 720 case OpDiv32: 721 return rewriteValueAMD64_OpDiv32(v) 722 case OpDiv32F: 723 v.Op = OpAMD64DIVSS 724 return true 725 case OpDiv32u: 726 return rewriteValueAMD64_OpDiv32u(v) 727 case OpDiv64: 728 return rewriteValueAMD64_OpDiv64(v) 729 case OpDiv64F: 730 v.Op = OpAMD64DIVSD 731 return true 732 case OpDiv64u: 733 return rewriteValueAMD64_OpDiv64u(v) 734 case OpDiv8: 735 return rewriteValueAMD64_OpDiv8(v) 736 case OpDiv8u: 737 return rewriteValueAMD64_OpDiv8u(v) 738 case OpEq16: 739 return rewriteValueAMD64_OpEq16(v) 740 case OpEq32: 741 return rewriteValueAMD64_OpEq32(v) 742 case OpEq32F: 743 return rewriteValueAMD64_OpEq32F(v) 744 case OpEq64: 745 return rewriteValueAMD64_OpEq64(v) 746 case OpEq64F: 747 return rewriteValueAMD64_OpEq64F(v) 748 case OpEq8: 749 return rewriteValueAMD64_OpEq8(v) 750 case OpEqB: 751 return rewriteValueAMD64_OpEqB(v) 752 case OpEqPtr: 753 return rewriteValueAMD64_OpEqPtr(v) 754 case OpFMA: 755 return rewriteValueAMD64_OpFMA(v) 756 case OpFloor: 757 return rewriteValueAMD64_OpFloor(v) 758 case OpGetCallerPC: 759 v.Op = OpAMD64LoweredGetCallerPC 760 return true 761 case OpGetCallerSP: 762 v.Op = OpAMD64LoweredGetCallerSP 763 return true 764 case OpGetClosurePtr: 765 v.Op = OpAMD64LoweredGetClosurePtr 766 return true 767 case OpGetG: 768 return rewriteValueAMD64_OpGetG(v) 769 case OpHasCPUFeature: 770 return rewriteValueAMD64_OpHasCPUFeature(v) 771 case OpHmul32: 772 v.Op = OpAMD64HMULL 773 return true 774 case OpHmul32u: 775 v.Op = OpAMD64HMULLU 776 return true 777 case OpHmul64: 778 v.Op = OpAMD64HMULQ 779 return true 780 case OpHmul64u: 781 v.Op = OpAMD64HMULQU 782 return true 783 case OpInterCall: 784 v.Op = OpAMD64CALLinter 785 return true 786 case OpIsInBounds: 787 return rewriteValueAMD64_OpIsInBounds(v) 788 case OpIsNonNil: 789 return rewriteValueAMD64_OpIsNonNil(v) 790 case OpIsSliceInBounds: 791 return rewriteValueAMD64_OpIsSliceInBounds(v) 792 case OpLeq16: 793 return rewriteValueAMD64_OpLeq16(v) 794 case OpLeq16U: 795 return rewriteValueAMD64_OpLeq16U(v) 796 case OpLeq32: 797 return rewriteValueAMD64_OpLeq32(v) 798 case OpLeq32F: 799 return rewriteValueAMD64_OpLeq32F(v) 800 case OpLeq32U: 801 return rewriteValueAMD64_OpLeq32U(v) 802 case OpLeq64: 803 return rewriteValueAMD64_OpLeq64(v) 804 case OpLeq64F: 805 return rewriteValueAMD64_OpLeq64F(v) 806 case OpLeq64U: 807 return rewriteValueAMD64_OpLeq64U(v) 808 case OpLeq8: 809 return rewriteValueAMD64_OpLeq8(v) 810 case OpLeq8U: 811 return rewriteValueAMD64_OpLeq8U(v) 812 case OpLess16: 813 return rewriteValueAMD64_OpLess16(v) 814 case OpLess16U: 815 return rewriteValueAMD64_OpLess16U(v) 816 case OpLess32: 817 return rewriteValueAMD64_OpLess32(v) 818 case OpLess32F: 819 return rewriteValueAMD64_OpLess32F(v) 820 case OpLess32U: 821 return rewriteValueAMD64_OpLess32U(v) 822 case OpLess64: 823 return rewriteValueAMD64_OpLess64(v) 824 case OpLess64F: 825 return rewriteValueAMD64_OpLess64F(v) 826 case OpLess64U: 827 return rewriteValueAMD64_OpLess64U(v) 828 case OpLess8: 829 return rewriteValueAMD64_OpLess8(v) 830 case OpLess8U: 831 return rewriteValueAMD64_OpLess8U(v) 832 case OpLoad: 833 return rewriteValueAMD64_OpLoad(v) 834 case OpLocalAddr: 835 return rewriteValueAMD64_OpLocalAddr(v) 836 case OpLsh16x16: 837 return rewriteValueAMD64_OpLsh16x16(v) 838 case OpLsh16x32: 839 return rewriteValueAMD64_OpLsh16x32(v) 840 case OpLsh16x64: 841 return rewriteValueAMD64_OpLsh16x64(v) 842 case OpLsh16x8: 843 return rewriteValueAMD64_OpLsh16x8(v) 844 case OpLsh32x16: 845 return rewriteValueAMD64_OpLsh32x16(v) 846 case OpLsh32x32: 847 return rewriteValueAMD64_OpLsh32x32(v) 848 case OpLsh32x64: 849 return rewriteValueAMD64_OpLsh32x64(v) 850 case OpLsh32x8: 851 return rewriteValueAMD64_OpLsh32x8(v) 852 case OpLsh64x16: 853 return rewriteValueAMD64_OpLsh64x16(v) 854 case OpLsh64x32: 855 return rewriteValueAMD64_OpLsh64x32(v) 856 case OpLsh64x64: 857 return rewriteValueAMD64_OpLsh64x64(v) 858 case OpLsh64x8: 859 return rewriteValueAMD64_OpLsh64x8(v) 860 case OpLsh8x16: 861 return rewriteValueAMD64_OpLsh8x16(v) 862 case OpLsh8x32: 863 return rewriteValueAMD64_OpLsh8x32(v) 864 case OpLsh8x64: 865 return rewriteValueAMD64_OpLsh8x64(v) 866 case OpLsh8x8: 867 return rewriteValueAMD64_OpLsh8x8(v) 868 case OpMax32F: 869 return rewriteValueAMD64_OpMax32F(v) 870 case OpMax64F: 871 return rewriteValueAMD64_OpMax64F(v) 872 case OpMin32F: 873 return rewriteValueAMD64_OpMin32F(v) 874 case OpMin64F: 875 return rewriteValueAMD64_OpMin64F(v) 876 case OpMod16: 877 return rewriteValueAMD64_OpMod16(v) 878 case OpMod16u: 879 return rewriteValueAMD64_OpMod16u(v) 880 case OpMod32: 881 return rewriteValueAMD64_OpMod32(v) 882 case OpMod32u: 883 return rewriteValueAMD64_OpMod32u(v) 884 case OpMod64: 885 return rewriteValueAMD64_OpMod64(v) 886 case OpMod64u: 887 return rewriteValueAMD64_OpMod64u(v) 888 case OpMod8: 889 return rewriteValueAMD64_OpMod8(v) 890 case OpMod8u: 891 return rewriteValueAMD64_OpMod8u(v) 892 case OpMove: 893 return rewriteValueAMD64_OpMove(v) 894 case OpMul16: 895 v.Op = OpAMD64MULL 896 return true 897 case OpMul32: 898 v.Op = OpAMD64MULL 899 return true 900 case OpMul32F: 901 v.Op = OpAMD64MULSS 902 return true 903 case OpMul64: 904 v.Op = OpAMD64MULQ 905 return true 906 case OpMul64F: 907 v.Op = OpAMD64MULSD 908 return true 909 case OpMul64uhilo: 910 v.Op = OpAMD64MULQU2 911 return true 912 case OpMul8: 913 v.Op = OpAMD64MULL 914 return true 915 case OpNeg16: 916 v.Op = OpAMD64NEGL 917 return true 918 case OpNeg32: 919 v.Op = OpAMD64NEGL 920 return true 921 case OpNeg32F: 922 return rewriteValueAMD64_OpNeg32F(v) 923 case OpNeg64: 924 v.Op = OpAMD64NEGQ 925 return true 926 case OpNeg64F: 927 return rewriteValueAMD64_OpNeg64F(v) 928 case OpNeg8: 929 v.Op = OpAMD64NEGL 930 return true 931 case OpNeq16: 932 return rewriteValueAMD64_OpNeq16(v) 933 case OpNeq32: 934 return rewriteValueAMD64_OpNeq32(v) 935 case OpNeq32F: 936 return rewriteValueAMD64_OpNeq32F(v) 937 case OpNeq64: 938 return rewriteValueAMD64_OpNeq64(v) 939 case OpNeq64F: 940 return rewriteValueAMD64_OpNeq64F(v) 941 case OpNeq8: 942 return rewriteValueAMD64_OpNeq8(v) 943 case OpNeqB: 944 return rewriteValueAMD64_OpNeqB(v) 945 case OpNeqPtr: 946 return rewriteValueAMD64_OpNeqPtr(v) 947 case OpNilCheck: 948 v.Op = OpAMD64LoweredNilCheck 949 return true 950 case OpNot: 951 return rewriteValueAMD64_OpNot(v) 952 case OpOffPtr: 953 return rewriteValueAMD64_OpOffPtr(v) 954 case OpOr16: 955 v.Op = OpAMD64ORL 956 return true 957 case OpOr32: 958 v.Op = OpAMD64ORL 959 return true 960 case OpOr64: 961 v.Op = OpAMD64ORQ 962 return true 963 case OpOr8: 964 v.Op = OpAMD64ORL 965 return true 966 case OpOrB: 967 v.Op = OpAMD64ORL 968 return true 969 case OpPanicBounds: 970 return rewriteValueAMD64_OpPanicBounds(v) 971 case OpPopCount16: 972 return rewriteValueAMD64_OpPopCount16(v) 973 case OpPopCount32: 974 v.Op = OpAMD64POPCNTL 975 return true 976 case OpPopCount64: 977 v.Op = OpAMD64POPCNTQ 978 return true 979 case OpPopCount8: 980 return rewriteValueAMD64_OpPopCount8(v) 981 case OpPrefetchCache: 982 v.Op = OpAMD64PrefetchT0 983 return true 984 case OpPrefetchCacheStreamed: 985 v.Op = OpAMD64PrefetchNTA 986 return true 987 case OpRotateLeft16: 988 v.Op = OpAMD64ROLW 989 return true 990 case OpRotateLeft32: 991 v.Op = OpAMD64ROLL 992 return true 993 case OpRotateLeft64: 994 v.Op = OpAMD64ROLQ 995 return true 996 case OpRotateLeft8: 997 v.Op = OpAMD64ROLB 998 return true 999 case OpRound32F: 1000 v.Op = OpCopy 1001 return true 1002 case OpRound64F: 1003 v.Op = OpCopy 1004 return true 1005 case OpRoundToEven: 1006 return rewriteValueAMD64_OpRoundToEven(v) 1007 case OpRsh16Ux16: 1008 return rewriteValueAMD64_OpRsh16Ux16(v) 1009 case OpRsh16Ux32: 1010 return rewriteValueAMD64_OpRsh16Ux32(v) 1011 case OpRsh16Ux64: 1012 return rewriteValueAMD64_OpRsh16Ux64(v) 1013 case OpRsh16Ux8: 1014 return rewriteValueAMD64_OpRsh16Ux8(v) 1015 case OpRsh16x16: 1016 return rewriteValueAMD64_OpRsh16x16(v) 1017 case OpRsh16x32: 1018 return rewriteValueAMD64_OpRsh16x32(v) 1019 case OpRsh16x64: 1020 return rewriteValueAMD64_OpRsh16x64(v) 1021 case OpRsh16x8: 1022 return rewriteValueAMD64_OpRsh16x8(v) 1023 case OpRsh32Ux16: 1024 return rewriteValueAMD64_OpRsh32Ux16(v) 1025 case OpRsh32Ux32: 1026 return rewriteValueAMD64_OpRsh32Ux32(v) 1027 case OpRsh32Ux64: 1028 return rewriteValueAMD64_OpRsh32Ux64(v) 1029 case OpRsh32Ux8: 1030 return rewriteValueAMD64_OpRsh32Ux8(v) 1031 case OpRsh32x16: 1032 return rewriteValueAMD64_OpRsh32x16(v) 1033 case OpRsh32x32: 1034 return rewriteValueAMD64_OpRsh32x32(v) 1035 case OpRsh32x64: 1036 return rewriteValueAMD64_OpRsh32x64(v) 1037 case OpRsh32x8: 1038 return rewriteValueAMD64_OpRsh32x8(v) 1039 case OpRsh64Ux16: 1040 return rewriteValueAMD64_OpRsh64Ux16(v) 1041 case OpRsh64Ux32: 1042 return rewriteValueAMD64_OpRsh64Ux32(v) 1043 case OpRsh64Ux64: 1044 return rewriteValueAMD64_OpRsh64Ux64(v) 1045 case OpRsh64Ux8: 1046 return rewriteValueAMD64_OpRsh64Ux8(v) 1047 case OpRsh64x16: 1048 return rewriteValueAMD64_OpRsh64x16(v) 1049 case OpRsh64x32: 1050 return rewriteValueAMD64_OpRsh64x32(v) 1051 case OpRsh64x64: 1052 return rewriteValueAMD64_OpRsh64x64(v) 1053 case OpRsh64x8: 1054 return rewriteValueAMD64_OpRsh64x8(v) 1055 case OpRsh8Ux16: 1056 return rewriteValueAMD64_OpRsh8Ux16(v) 1057 case OpRsh8Ux32: 1058 return rewriteValueAMD64_OpRsh8Ux32(v) 1059 case OpRsh8Ux64: 1060 return rewriteValueAMD64_OpRsh8Ux64(v) 1061 case OpRsh8Ux8: 1062 return rewriteValueAMD64_OpRsh8Ux8(v) 1063 case OpRsh8x16: 1064 return rewriteValueAMD64_OpRsh8x16(v) 1065 case OpRsh8x32: 1066 return rewriteValueAMD64_OpRsh8x32(v) 1067 case OpRsh8x64: 1068 return rewriteValueAMD64_OpRsh8x64(v) 1069 case OpRsh8x8: 1070 return rewriteValueAMD64_OpRsh8x8(v) 1071 case OpSelect0: 1072 return rewriteValueAMD64_OpSelect0(v) 1073 case OpSelect1: 1074 return rewriteValueAMD64_OpSelect1(v) 1075 case OpSelectN: 1076 return rewriteValueAMD64_OpSelectN(v) 1077 case OpSignExt16to32: 1078 v.Op = OpAMD64MOVWQSX 1079 return true 1080 case OpSignExt16to64: 1081 v.Op = OpAMD64MOVWQSX 1082 return true 1083 case OpSignExt32to64: 1084 v.Op = OpAMD64MOVLQSX 1085 return true 1086 case OpSignExt8to16: 1087 v.Op = OpAMD64MOVBQSX 1088 return true 1089 case OpSignExt8to32: 1090 v.Op = OpAMD64MOVBQSX 1091 return true 1092 case OpSignExt8to64: 1093 v.Op = OpAMD64MOVBQSX 1094 return true 1095 case OpSlicemask: 1096 return rewriteValueAMD64_OpSlicemask(v) 1097 case OpSpectreIndex: 1098 return rewriteValueAMD64_OpSpectreIndex(v) 1099 case OpSpectreSliceIndex: 1100 return rewriteValueAMD64_OpSpectreSliceIndex(v) 1101 case OpSqrt: 1102 v.Op = OpAMD64SQRTSD 1103 return true 1104 case OpSqrt32: 1105 v.Op = OpAMD64SQRTSS 1106 return true 1107 case OpStaticCall: 1108 v.Op = OpAMD64CALLstatic 1109 return true 1110 case OpStore: 1111 return rewriteValueAMD64_OpStore(v) 1112 case OpSub16: 1113 v.Op = OpAMD64SUBL 1114 return true 1115 case OpSub32: 1116 v.Op = OpAMD64SUBL 1117 return true 1118 case OpSub32F: 1119 v.Op = OpAMD64SUBSS 1120 return true 1121 case OpSub64: 1122 v.Op = OpAMD64SUBQ 1123 return true 1124 case OpSub64F: 1125 v.Op = OpAMD64SUBSD 1126 return true 1127 case OpSub8: 1128 v.Op = OpAMD64SUBL 1129 return true 1130 case OpSubPtr: 1131 v.Op = OpAMD64SUBQ 1132 return true 1133 case OpTailCall: 1134 v.Op = OpAMD64CALLtail 1135 return true 1136 case OpTrunc: 1137 return rewriteValueAMD64_OpTrunc(v) 1138 case OpTrunc16to8: 1139 v.Op = OpCopy 1140 return true 1141 case OpTrunc32to16: 1142 v.Op = OpCopy 1143 return true 1144 case OpTrunc32to8: 1145 v.Op = OpCopy 1146 return true 1147 case OpTrunc64to16: 1148 v.Op = OpCopy 1149 return true 1150 case OpTrunc64to32: 1151 v.Op = OpCopy 1152 return true 1153 case OpTrunc64to8: 1154 v.Op = OpCopy 1155 return true 1156 case OpWB: 1157 v.Op = OpAMD64LoweredWB 1158 return true 1159 case OpXor16: 1160 v.Op = OpAMD64XORL 1161 return true 1162 case OpXor32: 1163 v.Op = OpAMD64XORL 1164 return true 1165 case OpXor64: 1166 v.Op = OpAMD64XORQ 1167 return true 1168 case OpXor8: 1169 v.Op = OpAMD64XORL 1170 return true 1171 case OpZero: 1172 return rewriteValueAMD64_OpZero(v) 1173 case OpZeroExt16to32: 1174 v.Op = OpAMD64MOVWQZX 1175 return true 1176 case OpZeroExt16to64: 1177 v.Op = OpAMD64MOVWQZX 1178 return true 1179 case OpZeroExt32to64: 1180 v.Op = OpAMD64MOVLQZX 1181 return true 1182 case OpZeroExt8to16: 1183 v.Op = OpAMD64MOVBQZX 1184 return true 1185 case OpZeroExt8to32: 1186 v.Op = OpAMD64MOVBQZX 1187 return true 1188 case OpZeroExt8to64: 1189 v.Op = OpAMD64MOVBQZX 1190 return true 1191 } 1192 return false 1193 } 1194 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool { 1195 v_2 := v.Args[2] 1196 v_1 := v.Args[1] 1197 v_0 := v.Args[0] 1198 // match: (ADCQ x (MOVQconst [c]) carry) 1199 // cond: is32Bit(c) 1200 // result: (ADCQconst x [int32(c)] carry) 1201 for { 1202 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1203 x := v_0 1204 if v_1.Op != OpAMD64MOVQconst { 1205 continue 1206 } 1207 c := auxIntToInt64(v_1.AuxInt) 1208 carry := v_2 1209 if !(is32Bit(c)) { 1210 continue 1211 } 1212 v.reset(OpAMD64ADCQconst) 1213 v.AuxInt = int32ToAuxInt(int32(c)) 1214 v.AddArg2(x, carry) 1215 return true 1216 } 1217 break 1218 } 1219 // match: (ADCQ x y (FlagEQ)) 1220 // result: (ADDQcarry x y) 1221 for { 1222 x := v_0 1223 y := v_1 1224 if v_2.Op != OpAMD64FlagEQ { 1225 break 1226 } 1227 v.reset(OpAMD64ADDQcarry) 1228 v.AddArg2(x, y) 1229 return true 1230 } 1231 return false 1232 } 1233 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool { 1234 v_1 := v.Args[1] 1235 v_0 := v.Args[0] 1236 // match: (ADCQconst x [c] (FlagEQ)) 1237 // result: (ADDQconstcarry x [c]) 1238 for { 1239 c := auxIntToInt32(v.AuxInt) 1240 x := v_0 1241 if v_1.Op != OpAMD64FlagEQ { 1242 break 1243 } 1244 v.reset(OpAMD64ADDQconstcarry) 1245 v.AuxInt = int32ToAuxInt(c) 1246 v.AddArg(x) 1247 return true 1248 } 1249 return false 1250 } 1251 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { 1252 v_1 := v.Args[1] 1253 v_0 := v.Args[0] 1254 // match: (ADDL x (MOVLconst [c])) 1255 // result: (ADDLconst [c] x) 1256 for { 1257 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1258 x := v_0 1259 if v_1.Op != OpAMD64MOVLconst { 1260 continue 1261 } 1262 c := auxIntToInt32(v_1.AuxInt) 1263 v.reset(OpAMD64ADDLconst) 1264 v.AuxInt = int32ToAuxInt(c) 1265 v.AddArg(x) 1266 return true 1267 } 1268 break 1269 } 1270 // match: (ADDL x (SHLLconst [3] y)) 1271 // result: (LEAL8 x y) 1272 for { 1273 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1274 x := v_0 1275 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 { 1276 continue 1277 } 1278 y := v_1.Args[0] 1279 v.reset(OpAMD64LEAL8) 1280 v.AddArg2(x, y) 1281 return true 1282 } 1283 break 1284 } 1285 // match: (ADDL x (SHLLconst [2] y)) 1286 // result: (LEAL4 x y) 1287 for { 1288 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1289 x := v_0 1290 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 { 1291 continue 1292 } 1293 y := v_1.Args[0] 1294 v.reset(OpAMD64LEAL4) 1295 v.AddArg2(x, y) 1296 return true 1297 } 1298 break 1299 } 1300 // match: (ADDL x (SHLLconst [1] y)) 1301 // result: (LEAL2 x y) 1302 for { 1303 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1304 x := v_0 1305 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { 1306 continue 1307 } 1308 y := v_1.Args[0] 1309 v.reset(OpAMD64LEAL2) 1310 v.AddArg2(x, y) 1311 return true 1312 } 1313 break 1314 } 1315 // match: (ADDL x (ADDL y y)) 1316 // result: (LEAL2 x y) 1317 for { 1318 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1319 x := v_0 1320 if v_1.Op != OpAMD64ADDL { 1321 continue 1322 } 1323 y := v_1.Args[1] 1324 if y != v_1.Args[0] { 1325 continue 1326 } 1327 v.reset(OpAMD64LEAL2) 1328 v.AddArg2(x, y) 1329 return true 1330 } 1331 break 1332 } 1333 // match: (ADDL x (ADDL x y)) 1334 // result: (LEAL2 y x) 1335 for { 1336 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1337 x := v_0 1338 if v_1.Op != OpAMD64ADDL { 1339 continue 1340 } 1341 _ = v_1.Args[1] 1342 v_1_0 := v_1.Args[0] 1343 v_1_1 := v_1.Args[1] 1344 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { 1345 if x != v_1_0 { 1346 continue 1347 } 1348 y := v_1_1 1349 v.reset(OpAMD64LEAL2) 1350 v.AddArg2(y, x) 1351 return true 1352 } 1353 } 1354 break 1355 } 1356 // match: (ADDL (ADDLconst [c] x) y) 1357 // result: (LEAL1 [c] x y) 1358 for { 1359 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1360 if v_0.Op != OpAMD64ADDLconst { 1361 continue 1362 } 1363 c := auxIntToInt32(v_0.AuxInt) 1364 x := v_0.Args[0] 1365 y := v_1 1366 v.reset(OpAMD64LEAL1) 1367 v.AuxInt = int32ToAuxInt(c) 1368 v.AddArg2(x, y) 1369 return true 1370 } 1371 break 1372 } 1373 // match: (ADDL x (LEAL [c] {s} y)) 1374 // cond: x.Op != OpSB && y.Op != OpSB 1375 // result: (LEAL1 [c] {s} x y) 1376 for { 1377 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1378 x := v_0 1379 if v_1.Op != OpAMD64LEAL { 1380 continue 1381 } 1382 c := auxIntToInt32(v_1.AuxInt) 1383 s := auxToSym(v_1.Aux) 1384 y := v_1.Args[0] 1385 if !(x.Op != OpSB && y.Op != OpSB) { 1386 continue 1387 } 1388 v.reset(OpAMD64LEAL1) 1389 v.AuxInt = int32ToAuxInt(c) 1390 v.Aux = symToAux(s) 1391 v.AddArg2(x, y) 1392 return true 1393 } 1394 break 1395 } 1396 // match: (ADDL x (NEGL y)) 1397 // result: (SUBL x y) 1398 for { 1399 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1400 x := v_0 1401 if v_1.Op != OpAMD64NEGL { 1402 continue 1403 } 1404 y := v_1.Args[0] 1405 v.reset(OpAMD64SUBL) 1406 v.AddArg2(x, y) 1407 return true 1408 } 1409 break 1410 } 1411 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1412 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 1413 // result: (ADDLload x [off] {sym} ptr mem) 1414 for { 1415 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1416 x := v_0 1417 l := v_1 1418 if l.Op != OpAMD64MOVLload { 1419 continue 1420 } 1421 off := auxIntToInt32(l.AuxInt) 1422 sym := auxToSym(l.Aux) 1423 mem := l.Args[1] 1424 ptr := l.Args[0] 1425 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 1426 continue 1427 } 1428 v.reset(OpAMD64ADDLload) 1429 v.AuxInt = int32ToAuxInt(off) 1430 v.Aux = symToAux(sym) 1431 v.AddArg3(x, ptr, mem) 1432 return true 1433 } 1434 break 1435 } 1436 return false 1437 } 1438 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { 1439 v_0 := v.Args[0] 1440 // match: (ADDLconst [c] (ADDL x y)) 1441 // result: (LEAL1 [c] x y) 1442 for { 1443 c := auxIntToInt32(v.AuxInt) 1444 if v_0.Op != OpAMD64ADDL { 1445 break 1446 } 1447 y := v_0.Args[1] 1448 x := v_0.Args[0] 1449 v.reset(OpAMD64LEAL1) 1450 v.AuxInt = int32ToAuxInt(c) 1451 v.AddArg2(x, y) 1452 return true 1453 } 1454 // match: (ADDLconst [c] (SHLLconst [1] x)) 1455 // result: (LEAL1 [c] x x) 1456 for { 1457 c := auxIntToInt32(v.AuxInt) 1458 if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 { 1459 break 1460 } 1461 x := v_0.Args[0] 1462 v.reset(OpAMD64LEAL1) 1463 v.AuxInt = int32ToAuxInt(c) 1464 v.AddArg2(x, x) 1465 return true 1466 } 1467 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1468 // cond: is32Bit(int64(c)+int64(d)) 1469 // result: (LEAL [c+d] {s} x) 1470 for { 1471 c := auxIntToInt32(v.AuxInt) 1472 if v_0.Op != OpAMD64LEAL { 1473 break 1474 } 1475 d := auxIntToInt32(v_0.AuxInt) 1476 s := auxToSym(v_0.Aux) 1477 x := v_0.Args[0] 1478 if !(is32Bit(int64(c) + int64(d))) { 1479 break 1480 } 1481 v.reset(OpAMD64LEAL) 1482 v.AuxInt = int32ToAuxInt(c + d) 1483 v.Aux = symToAux(s) 1484 v.AddArg(x) 1485 return true 1486 } 1487 // match: (ADDLconst [c] (LEAL1 [d] {s} x y)) 1488 // cond: is32Bit(int64(c)+int64(d)) 1489 // result: (LEAL1 [c+d] {s} x y) 1490 for { 1491 c := auxIntToInt32(v.AuxInt) 1492 if v_0.Op != OpAMD64LEAL1 { 1493 break 1494 } 1495 d := auxIntToInt32(v_0.AuxInt) 1496 s := auxToSym(v_0.Aux) 1497 y := v_0.Args[1] 1498 x := v_0.Args[0] 1499 if !(is32Bit(int64(c) + int64(d))) { 1500 break 1501 } 1502 v.reset(OpAMD64LEAL1) 1503 v.AuxInt = int32ToAuxInt(c + d) 1504 v.Aux = symToAux(s) 1505 v.AddArg2(x, y) 1506 return true 1507 } 1508 // match: (ADDLconst [c] (LEAL2 [d] {s} x y)) 1509 // cond: is32Bit(int64(c)+int64(d)) 1510 // result: (LEAL2 [c+d] {s} x y) 1511 for { 1512 c := auxIntToInt32(v.AuxInt) 1513 if v_0.Op != OpAMD64LEAL2 { 1514 break 1515 } 1516 d := auxIntToInt32(v_0.AuxInt) 1517 s := auxToSym(v_0.Aux) 1518 y := v_0.Args[1] 1519 x := v_0.Args[0] 1520 if !(is32Bit(int64(c) + int64(d))) { 1521 break 1522 } 1523 v.reset(OpAMD64LEAL2) 1524 v.AuxInt = int32ToAuxInt(c + d) 1525 v.Aux = symToAux(s) 1526 v.AddArg2(x, y) 1527 return true 1528 } 1529 // match: (ADDLconst [c] (LEAL4 [d] {s} x y)) 1530 // cond: is32Bit(int64(c)+int64(d)) 1531 // result: (LEAL4 [c+d] {s} x y) 1532 for { 1533 c := auxIntToInt32(v.AuxInt) 1534 if v_0.Op != OpAMD64LEAL4 { 1535 break 1536 } 1537 d := auxIntToInt32(v_0.AuxInt) 1538 s := auxToSym(v_0.Aux) 1539 y := v_0.Args[1] 1540 x := v_0.Args[0] 1541 if !(is32Bit(int64(c) + int64(d))) { 1542 break 1543 } 1544 v.reset(OpAMD64LEAL4) 1545 v.AuxInt = int32ToAuxInt(c + d) 1546 v.Aux = symToAux(s) 1547 v.AddArg2(x, y) 1548 return true 1549 } 1550 // match: (ADDLconst [c] (LEAL8 [d] {s} x y)) 1551 // cond: is32Bit(int64(c)+int64(d)) 1552 // result: (LEAL8 [c+d] {s} x y) 1553 for { 1554 c := auxIntToInt32(v.AuxInt) 1555 if v_0.Op != OpAMD64LEAL8 { 1556 break 1557 } 1558 d := auxIntToInt32(v_0.AuxInt) 1559 s := auxToSym(v_0.Aux) 1560 y := v_0.Args[1] 1561 x := v_0.Args[0] 1562 if !(is32Bit(int64(c) + int64(d))) { 1563 break 1564 } 1565 v.reset(OpAMD64LEAL8) 1566 v.AuxInt = int32ToAuxInt(c + d) 1567 v.Aux = symToAux(s) 1568 v.AddArg2(x, y) 1569 return true 1570 } 1571 // match: (ADDLconst [c] x) 1572 // cond: c==0 1573 // result: x 1574 for { 1575 c := auxIntToInt32(v.AuxInt) 1576 x := v_0 1577 if !(c == 0) { 1578 break 1579 } 1580 v.copyOf(x) 1581 return true 1582 } 1583 // match: (ADDLconst [c] (MOVLconst [d])) 1584 // result: (MOVLconst [c+d]) 1585 for { 1586 c := auxIntToInt32(v.AuxInt) 1587 if v_0.Op != OpAMD64MOVLconst { 1588 break 1589 } 1590 d := auxIntToInt32(v_0.AuxInt) 1591 v.reset(OpAMD64MOVLconst) 1592 v.AuxInt = int32ToAuxInt(c + d) 1593 return true 1594 } 1595 // match: (ADDLconst [c] (ADDLconst [d] x)) 1596 // result: (ADDLconst [c+d] x) 1597 for { 1598 c := auxIntToInt32(v.AuxInt) 1599 if v_0.Op != OpAMD64ADDLconst { 1600 break 1601 } 1602 d := auxIntToInt32(v_0.AuxInt) 1603 x := v_0.Args[0] 1604 v.reset(OpAMD64ADDLconst) 1605 v.AuxInt = int32ToAuxInt(c + d) 1606 v.AddArg(x) 1607 return true 1608 } 1609 // match: (ADDLconst [off] x:(SP)) 1610 // result: (LEAL [off] x) 1611 for { 1612 off := auxIntToInt32(v.AuxInt) 1613 x := v_0 1614 if x.Op != OpSP { 1615 break 1616 } 1617 v.reset(OpAMD64LEAL) 1618 v.AuxInt = int32ToAuxInt(off) 1619 v.AddArg(x) 1620 return true 1621 } 1622 return false 1623 } 1624 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool { 1625 v_1 := v.Args[1] 1626 v_0 := v.Args[0] 1627 // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 1628 // cond: ValAndOff(valoff1).canAdd32(off2) 1629 // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 1630 for { 1631 valoff1 := auxIntToValAndOff(v.AuxInt) 1632 sym := auxToSym(v.Aux) 1633 if v_0.Op != OpAMD64ADDQconst { 1634 break 1635 } 1636 off2 := auxIntToInt32(v_0.AuxInt) 1637 base := v_0.Args[0] 1638 mem := v_1 1639 if !(ValAndOff(valoff1).canAdd32(off2)) { 1640 break 1641 } 1642 v.reset(OpAMD64ADDLconstmodify) 1643 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 1644 v.Aux = symToAux(sym) 1645 v.AddArg2(base, mem) 1646 return true 1647 } 1648 // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 1649 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 1650 // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 1651 for { 1652 valoff1 := auxIntToValAndOff(v.AuxInt) 1653 sym1 := auxToSym(v.Aux) 1654 if v_0.Op != OpAMD64LEAQ { 1655 break 1656 } 1657 off2 := auxIntToInt32(v_0.AuxInt) 1658 sym2 := auxToSym(v_0.Aux) 1659 base := v_0.Args[0] 1660 mem := v_1 1661 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 1662 break 1663 } 1664 v.reset(OpAMD64ADDLconstmodify) 1665 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 1666 v.Aux = symToAux(mergeSym(sym1, sym2)) 1667 v.AddArg2(base, mem) 1668 return true 1669 } 1670 return false 1671 } 1672 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { 1673 v_2 := v.Args[2] 1674 v_1 := v.Args[1] 1675 v_0 := v.Args[0] 1676 b := v.Block 1677 typ := &b.Func.Config.Types 1678 // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem) 1679 // cond: is32Bit(int64(off1)+int64(off2)) 1680 // result: (ADDLload [off1+off2] {sym} val base mem) 1681 for { 1682 off1 := auxIntToInt32(v.AuxInt) 1683 sym := auxToSym(v.Aux) 1684 val := v_0 1685 if v_1.Op != OpAMD64ADDQconst { 1686 break 1687 } 1688 off2 := auxIntToInt32(v_1.AuxInt) 1689 base := v_1.Args[0] 1690 mem := v_2 1691 if !(is32Bit(int64(off1) + int64(off2))) { 1692 break 1693 } 1694 v.reset(OpAMD64ADDLload) 1695 v.AuxInt = int32ToAuxInt(off1 + off2) 1696 v.Aux = symToAux(sym) 1697 v.AddArg3(val, base, mem) 1698 return true 1699 } 1700 // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 1701 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 1702 // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 1703 for { 1704 off1 := auxIntToInt32(v.AuxInt) 1705 sym1 := auxToSym(v.Aux) 1706 val := v_0 1707 if v_1.Op != OpAMD64LEAQ { 1708 break 1709 } 1710 off2 := auxIntToInt32(v_1.AuxInt) 1711 sym2 := auxToSym(v_1.Aux) 1712 base := v_1.Args[0] 1713 mem := v_2 1714 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 1715 break 1716 } 1717 v.reset(OpAMD64ADDLload) 1718 v.AuxInt = int32ToAuxInt(off1 + off2) 1719 v.Aux = symToAux(mergeSym(sym1, sym2)) 1720 v.AddArg3(val, base, mem) 1721 return true 1722 } 1723 // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 1724 // result: (ADDL x (MOVLf2i y)) 1725 for { 1726 off := auxIntToInt32(v.AuxInt) 1727 sym := auxToSym(v.Aux) 1728 x := v_0 1729 ptr := v_1 1730 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 1731 break 1732 } 1733 y := v_2.Args[1] 1734 if ptr != v_2.Args[0] { 1735 break 1736 } 1737 v.reset(OpAMD64ADDL) 1738 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 1739 v0.AddArg(y) 1740 v.AddArg2(x, v0) 1741 return true 1742 } 1743 return false 1744 } 1745 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool { 1746 v_2 := v.Args[2] 1747 v_1 := v.Args[1] 1748 v_0 := v.Args[0] 1749 // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 1750 // cond: is32Bit(int64(off1)+int64(off2)) 1751 // result: (ADDLmodify [off1+off2] {sym} base val mem) 1752 for { 1753 off1 := auxIntToInt32(v.AuxInt) 1754 sym := auxToSym(v.Aux) 1755 if v_0.Op != OpAMD64ADDQconst { 1756 break 1757 } 1758 off2 := auxIntToInt32(v_0.AuxInt) 1759 base := v_0.Args[0] 1760 val := v_1 1761 mem := v_2 1762 if !(is32Bit(int64(off1) + int64(off2))) { 1763 break 1764 } 1765 v.reset(OpAMD64ADDLmodify) 1766 v.AuxInt = int32ToAuxInt(off1 + off2) 1767 v.Aux = symToAux(sym) 1768 v.AddArg3(base, val, mem) 1769 return true 1770 } 1771 // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 1772 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 1773 // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 1774 for { 1775 off1 := auxIntToInt32(v.AuxInt) 1776 sym1 := auxToSym(v.Aux) 1777 if v_0.Op != OpAMD64LEAQ { 1778 break 1779 } 1780 off2 := auxIntToInt32(v_0.AuxInt) 1781 sym2 := auxToSym(v_0.Aux) 1782 base := v_0.Args[0] 1783 val := v_1 1784 mem := v_2 1785 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 1786 break 1787 } 1788 v.reset(OpAMD64ADDLmodify) 1789 v.AuxInt = int32ToAuxInt(off1 + off2) 1790 v.Aux = symToAux(mergeSym(sym1, sym2)) 1791 v.AddArg3(base, val, mem) 1792 return true 1793 } 1794 return false 1795 } 1796 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { 1797 v_1 := v.Args[1] 1798 v_0 := v.Args[0] 1799 // match: (ADDQ x (MOVQconst <t> [c])) 1800 // cond: is32Bit(c) && !t.IsPtr() 1801 // result: (ADDQconst [int32(c)] x) 1802 for { 1803 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1804 x := v_0 1805 if v_1.Op != OpAMD64MOVQconst { 1806 continue 1807 } 1808 t := v_1.Type 1809 c := auxIntToInt64(v_1.AuxInt) 1810 if !(is32Bit(c) && !t.IsPtr()) { 1811 continue 1812 } 1813 v.reset(OpAMD64ADDQconst) 1814 v.AuxInt = int32ToAuxInt(int32(c)) 1815 v.AddArg(x) 1816 return true 1817 } 1818 break 1819 } 1820 // match: (ADDQ x (MOVLconst [c])) 1821 // result: (ADDQconst [c] x) 1822 for { 1823 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1824 x := v_0 1825 if v_1.Op != OpAMD64MOVLconst { 1826 continue 1827 } 1828 c := auxIntToInt32(v_1.AuxInt) 1829 v.reset(OpAMD64ADDQconst) 1830 v.AuxInt = int32ToAuxInt(c) 1831 v.AddArg(x) 1832 return true 1833 } 1834 break 1835 } 1836 // match: (ADDQ x (SHLQconst [3] y)) 1837 // result: (LEAQ8 x y) 1838 for { 1839 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1840 x := v_0 1841 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 { 1842 continue 1843 } 1844 y := v_1.Args[0] 1845 v.reset(OpAMD64LEAQ8) 1846 v.AddArg2(x, y) 1847 return true 1848 } 1849 break 1850 } 1851 // match: (ADDQ x (SHLQconst [2] y)) 1852 // result: (LEAQ4 x y) 1853 for { 1854 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1855 x := v_0 1856 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 { 1857 continue 1858 } 1859 y := v_1.Args[0] 1860 v.reset(OpAMD64LEAQ4) 1861 v.AddArg2(x, y) 1862 return true 1863 } 1864 break 1865 } 1866 // match: (ADDQ x (SHLQconst [1] y)) 1867 // result: (LEAQ2 x y) 1868 for { 1869 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1870 x := v_0 1871 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { 1872 continue 1873 } 1874 y := v_1.Args[0] 1875 v.reset(OpAMD64LEAQ2) 1876 v.AddArg2(x, y) 1877 return true 1878 } 1879 break 1880 } 1881 // match: (ADDQ x (ADDQ y y)) 1882 // result: (LEAQ2 x y) 1883 for { 1884 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1885 x := v_0 1886 if v_1.Op != OpAMD64ADDQ { 1887 continue 1888 } 1889 y := v_1.Args[1] 1890 if y != v_1.Args[0] { 1891 continue 1892 } 1893 v.reset(OpAMD64LEAQ2) 1894 v.AddArg2(x, y) 1895 return true 1896 } 1897 break 1898 } 1899 // match: (ADDQ x (ADDQ x y)) 1900 // result: (LEAQ2 y x) 1901 for { 1902 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1903 x := v_0 1904 if v_1.Op != OpAMD64ADDQ { 1905 continue 1906 } 1907 _ = v_1.Args[1] 1908 v_1_0 := v_1.Args[0] 1909 v_1_1 := v_1.Args[1] 1910 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { 1911 if x != v_1_0 { 1912 continue 1913 } 1914 y := v_1_1 1915 v.reset(OpAMD64LEAQ2) 1916 v.AddArg2(y, x) 1917 return true 1918 } 1919 } 1920 break 1921 } 1922 // match: (ADDQ (ADDQconst [c] x) y) 1923 // result: (LEAQ1 [c] x y) 1924 for { 1925 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1926 if v_0.Op != OpAMD64ADDQconst { 1927 continue 1928 } 1929 c := auxIntToInt32(v_0.AuxInt) 1930 x := v_0.Args[0] 1931 y := v_1 1932 v.reset(OpAMD64LEAQ1) 1933 v.AuxInt = int32ToAuxInt(c) 1934 v.AddArg2(x, y) 1935 return true 1936 } 1937 break 1938 } 1939 // match: (ADDQ x (LEAQ [c] {s} y)) 1940 // cond: x.Op != OpSB && y.Op != OpSB 1941 // result: (LEAQ1 [c] {s} x y) 1942 for { 1943 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1944 x := v_0 1945 if v_1.Op != OpAMD64LEAQ { 1946 continue 1947 } 1948 c := auxIntToInt32(v_1.AuxInt) 1949 s := auxToSym(v_1.Aux) 1950 y := v_1.Args[0] 1951 if !(x.Op != OpSB && y.Op != OpSB) { 1952 continue 1953 } 1954 v.reset(OpAMD64LEAQ1) 1955 v.AuxInt = int32ToAuxInt(c) 1956 v.Aux = symToAux(s) 1957 v.AddArg2(x, y) 1958 return true 1959 } 1960 break 1961 } 1962 // match: (ADDQ x (NEGQ y)) 1963 // result: (SUBQ x y) 1964 for { 1965 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1966 x := v_0 1967 if v_1.Op != OpAMD64NEGQ { 1968 continue 1969 } 1970 y := v_1.Args[0] 1971 v.reset(OpAMD64SUBQ) 1972 v.AddArg2(x, y) 1973 return true 1974 } 1975 break 1976 } 1977 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 1978 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 1979 // result: (ADDQload x [off] {sym} ptr mem) 1980 for { 1981 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 1982 x := v_0 1983 l := v_1 1984 if l.Op != OpAMD64MOVQload { 1985 continue 1986 } 1987 off := auxIntToInt32(l.AuxInt) 1988 sym := auxToSym(l.Aux) 1989 mem := l.Args[1] 1990 ptr := l.Args[0] 1991 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 1992 continue 1993 } 1994 v.reset(OpAMD64ADDQload) 1995 v.AuxInt = int32ToAuxInt(off) 1996 v.Aux = symToAux(sym) 1997 v.AddArg3(x, ptr, mem) 1998 return true 1999 } 2000 break 2001 } 2002 return false 2003 } 2004 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool { 2005 v_1 := v.Args[1] 2006 v_0 := v.Args[0] 2007 // match: (ADDQcarry x (MOVQconst [c])) 2008 // cond: is32Bit(c) 2009 // result: (ADDQconstcarry x [int32(c)]) 2010 for { 2011 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2012 x := v_0 2013 if v_1.Op != OpAMD64MOVQconst { 2014 continue 2015 } 2016 c := auxIntToInt64(v_1.AuxInt) 2017 if !(is32Bit(c)) { 2018 continue 2019 } 2020 v.reset(OpAMD64ADDQconstcarry) 2021 v.AuxInt = int32ToAuxInt(int32(c)) 2022 v.AddArg(x) 2023 return true 2024 } 2025 break 2026 } 2027 return false 2028 } 2029 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { 2030 v_0 := v.Args[0] 2031 // match: (ADDQconst [c] (ADDQ x y)) 2032 // result: (LEAQ1 [c] x y) 2033 for { 2034 c := auxIntToInt32(v.AuxInt) 2035 if v_0.Op != OpAMD64ADDQ { 2036 break 2037 } 2038 y := v_0.Args[1] 2039 x := v_0.Args[0] 2040 v.reset(OpAMD64LEAQ1) 2041 v.AuxInt = int32ToAuxInt(c) 2042 v.AddArg2(x, y) 2043 return true 2044 } 2045 // match: (ADDQconst [c] (SHLQconst [1] x)) 2046 // result: (LEAQ1 [c] x x) 2047 for { 2048 c := auxIntToInt32(v.AuxInt) 2049 if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 { 2050 break 2051 } 2052 x := v_0.Args[0] 2053 v.reset(OpAMD64LEAQ1) 2054 v.AuxInt = int32ToAuxInt(c) 2055 v.AddArg2(x, x) 2056 return true 2057 } 2058 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 2059 // cond: is32Bit(int64(c)+int64(d)) 2060 // result: (LEAQ [c+d] {s} x) 2061 for { 2062 c := auxIntToInt32(v.AuxInt) 2063 if v_0.Op != OpAMD64LEAQ { 2064 break 2065 } 2066 d := auxIntToInt32(v_0.AuxInt) 2067 s := auxToSym(v_0.Aux) 2068 x := v_0.Args[0] 2069 if !(is32Bit(int64(c) + int64(d))) { 2070 break 2071 } 2072 v.reset(OpAMD64LEAQ) 2073 v.AuxInt = int32ToAuxInt(c + d) 2074 v.Aux = symToAux(s) 2075 v.AddArg(x) 2076 return true 2077 } 2078 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 2079 // cond: is32Bit(int64(c)+int64(d)) 2080 // result: (LEAQ1 [c+d] {s} x y) 2081 for { 2082 c := auxIntToInt32(v.AuxInt) 2083 if v_0.Op != OpAMD64LEAQ1 { 2084 break 2085 } 2086 d := auxIntToInt32(v_0.AuxInt) 2087 s := auxToSym(v_0.Aux) 2088 y := v_0.Args[1] 2089 x := v_0.Args[0] 2090 if !(is32Bit(int64(c) + int64(d))) { 2091 break 2092 } 2093 v.reset(OpAMD64LEAQ1) 2094 v.AuxInt = int32ToAuxInt(c + d) 2095 v.Aux = symToAux(s) 2096 v.AddArg2(x, y) 2097 return true 2098 } 2099 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 2100 // cond: is32Bit(int64(c)+int64(d)) 2101 // result: (LEAQ2 [c+d] {s} x y) 2102 for { 2103 c := auxIntToInt32(v.AuxInt) 2104 if v_0.Op != OpAMD64LEAQ2 { 2105 break 2106 } 2107 d := auxIntToInt32(v_0.AuxInt) 2108 s := auxToSym(v_0.Aux) 2109 y := v_0.Args[1] 2110 x := v_0.Args[0] 2111 if !(is32Bit(int64(c) + int64(d))) { 2112 break 2113 } 2114 v.reset(OpAMD64LEAQ2) 2115 v.AuxInt = int32ToAuxInt(c + d) 2116 v.Aux = symToAux(s) 2117 v.AddArg2(x, y) 2118 return true 2119 } 2120 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 2121 // cond: is32Bit(int64(c)+int64(d)) 2122 // result: (LEAQ4 [c+d] {s} x y) 2123 for { 2124 c := auxIntToInt32(v.AuxInt) 2125 if v_0.Op != OpAMD64LEAQ4 { 2126 break 2127 } 2128 d := auxIntToInt32(v_0.AuxInt) 2129 s := auxToSym(v_0.Aux) 2130 y := v_0.Args[1] 2131 x := v_0.Args[0] 2132 if !(is32Bit(int64(c) + int64(d))) { 2133 break 2134 } 2135 v.reset(OpAMD64LEAQ4) 2136 v.AuxInt = int32ToAuxInt(c + d) 2137 v.Aux = symToAux(s) 2138 v.AddArg2(x, y) 2139 return true 2140 } 2141 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 2142 // cond: is32Bit(int64(c)+int64(d)) 2143 // result: (LEAQ8 [c+d] {s} x y) 2144 for { 2145 c := auxIntToInt32(v.AuxInt) 2146 if v_0.Op != OpAMD64LEAQ8 { 2147 break 2148 } 2149 d := auxIntToInt32(v_0.AuxInt) 2150 s := auxToSym(v_0.Aux) 2151 y := v_0.Args[1] 2152 x := v_0.Args[0] 2153 if !(is32Bit(int64(c) + int64(d))) { 2154 break 2155 } 2156 v.reset(OpAMD64LEAQ8) 2157 v.AuxInt = int32ToAuxInt(c + d) 2158 v.Aux = symToAux(s) 2159 v.AddArg2(x, y) 2160 return true 2161 } 2162 // match: (ADDQconst [0] x) 2163 // result: x 2164 for { 2165 if auxIntToInt32(v.AuxInt) != 0 { 2166 break 2167 } 2168 x := v_0 2169 v.copyOf(x) 2170 return true 2171 } 2172 // match: (ADDQconst [c] (MOVQconst [d])) 2173 // result: (MOVQconst [int64(c)+d]) 2174 for { 2175 c := auxIntToInt32(v.AuxInt) 2176 if v_0.Op != OpAMD64MOVQconst { 2177 break 2178 } 2179 d := auxIntToInt64(v_0.AuxInt) 2180 v.reset(OpAMD64MOVQconst) 2181 v.AuxInt = int64ToAuxInt(int64(c) + d) 2182 return true 2183 } 2184 // match: (ADDQconst [c] (ADDQconst [d] x)) 2185 // cond: is32Bit(int64(c)+int64(d)) 2186 // result: (ADDQconst [c+d] x) 2187 for { 2188 c := auxIntToInt32(v.AuxInt) 2189 if v_0.Op != OpAMD64ADDQconst { 2190 break 2191 } 2192 d := auxIntToInt32(v_0.AuxInt) 2193 x := v_0.Args[0] 2194 if !(is32Bit(int64(c) + int64(d))) { 2195 break 2196 } 2197 v.reset(OpAMD64ADDQconst) 2198 v.AuxInt = int32ToAuxInt(c + d) 2199 v.AddArg(x) 2200 return true 2201 } 2202 // match: (ADDQconst [off] x:(SP)) 2203 // result: (LEAQ [off] x) 2204 for { 2205 off := auxIntToInt32(v.AuxInt) 2206 x := v_0 2207 if x.Op != OpSP { 2208 break 2209 } 2210 v.reset(OpAMD64LEAQ) 2211 v.AuxInt = int32ToAuxInt(off) 2212 v.AddArg(x) 2213 return true 2214 } 2215 return false 2216 } 2217 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool { 2218 v_1 := v.Args[1] 2219 v_0 := v.Args[0] 2220 // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 2221 // cond: ValAndOff(valoff1).canAdd32(off2) 2222 // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 2223 for { 2224 valoff1 := auxIntToValAndOff(v.AuxInt) 2225 sym := auxToSym(v.Aux) 2226 if v_0.Op != OpAMD64ADDQconst { 2227 break 2228 } 2229 off2 := auxIntToInt32(v_0.AuxInt) 2230 base := v_0.Args[0] 2231 mem := v_1 2232 if !(ValAndOff(valoff1).canAdd32(off2)) { 2233 break 2234 } 2235 v.reset(OpAMD64ADDQconstmodify) 2236 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 2237 v.Aux = symToAux(sym) 2238 v.AddArg2(base, mem) 2239 return true 2240 } 2241 // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 2242 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 2243 // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 2244 for { 2245 valoff1 := auxIntToValAndOff(v.AuxInt) 2246 sym1 := auxToSym(v.Aux) 2247 if v_0.Op != OpAMD64LEAQ { 2248 break 2249 } 2250 off2 := auxIntToInt32(v_0.AuxInt) 2251 sym2 := auxToSym(v_0.Aux) 2252 base := v_0.Args[0] 2253 mem := v_1 2254 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 2255 break 2256 } 2257 v.reset(OpAMD64ADDQconstmodify) 2258 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 2259 v.Aux = symToAux(mergeSym(sym1, sym2)) 2260 v.AddArg2(base, mem) 2261 return true 2262 } 2263 return false 2264 } 2265 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { 2266 v_2 := v.Args[2] 2267 v_1 := v.Args[1] 2268 v_0 := v.Args[0] 2269 b := v.Block 2270 typ := &b.Func.Config.Types 2271 // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem) 2272 // cond: is32Bit(int64(off1)+int64(off2)) 2273 // result: (ADDQload [off1+off2] {sym} val base mem) 2274 for { 2275 off1 := auxIntToInt32(v.AuxInt) 2276 sym := auxToSym(v.Aux) 2277 val := v_0 2278 if v_1.Op != OpAMD64ADDQconst { 2279 break 2280 } 2281 off2 := auxIntToInt32(v_1.AuxInt) 2282 base := v_1.Args[0] 2283 mem := v_2 2284 if !(is32Bit(int64(off1) + int64(off2))) { 2285 break 2286 } 2287 v.reset(OpAMD64ADDQload) 2288 v.AuxInt = int32ToAuxInt(off1 + off2) 2289 v.Aux = symToAux(sym) 2290 v.AddArg3(val, base, mem) 2291 return true 2292 } 2293 // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2294 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 2295 // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2296 for { 2297 off1 := auxIntToInt32(v.AuxInt) 2298 sym1 := auxToSym(v.Aux) 2299 val := v_0 2300 if v_1.Op != OpAMD64LEAQ { 2301 break 2302 } 2303 off2 := auxIntToInt32(v_1.AuxInt) 2304 sym2 := auxToSym(v_1.Aux) 2305 base := v_1.Args[0] 2306 mem := v_2 2307 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 2308 break 2309 } 2310 v.reset(OpAMD64ADDQload) 2311 v.AuxInt = int32ToAuxInt(off1 + off2) 2312 v.Aux = symToAux(mergeSym(sym1, sym2)) 2313 v.AddArg3(val, base, mem) 2314 return true 2315 } 2316 // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2317 // result: (ADDQ x (MOVQf2i y)) 2318 for { 2319 off := auxIntToInt32(v.AuxInt) 2320 sym := auxToSym(v.Aux) 2321 x := v_0 2322 ptr := v_1 2323 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 2324 break 2325 } 2326 y := v_2.Args[1] 2327 if ptr != v_2.Args[0] { 2328 break 2329 } 2330 v.reset(OpAMD64ADDQ) 2331 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 2332 v0.AddArg(y) 2333 v.AddArg2(x, v0) 2334 return true 2335 } 2336 return false 2337 } 2338 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool { 2339 v_2 := v.Args[2] 2340 v_1 := v.Args[1] 2341 v_0 := v.Args[0] 2342 // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 2343 // cond: is32Bit(int64(off1)+int64(off2)) 2344 // result: (ADDQmodify [off1+off2] {sym} base val mem) 2345 for { 2346 off1 := auxIntToInt32(v.AuxInt) 2347 sym := auxToSym(v.Aux) 2348 if v_0.Op != OpAMD64ADDQconst { 2349 break 2350 } 2351 off2 := auxIntToInt32(v_0.AuxInt) 2352 base := v_0.Args[0] 2353 val := v_1 2354 mem := v_2 2355 if !(is32Bit(int64(off1) + int64(off2))) { 2356 break 2357 } 2358 v.reset(OpAMD64ADDQmodify) 2359 v.AuxInt = int32ToAuxInt(off1 + off2) 2360 v.Aux = symToAux(sym) 2361 v.AddArg3(base, val, mem) 2362 return true 2363 } 2364 // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 2365 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 2366 // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 2367 for { 2368 off1 := auxIntToInt32(v.AuxInt) 2369 sym1 := auxToSym(v.Aux) 2370 if v_0.Op != OpAMD64LEAQ { 2371 break 2372 } 2373 off2 := auxIntToInt32(v_0.AuxInt) 2374 sym2 := auxToSym(v_0.Aux) 2375 base := v_0.Args[0] 2376 val := v_1 2377 mem := v_2 2378 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 2379 break 2380 } 2381 v.reset(OpAMD64ADDQmodify) 2382 v.AuxInt = int32ToAuxInt(off1 + off2) 2383 v.Aux = symToAux(mergeSym(sym1, sym2)) 2384 v.AddArg3(base, val, mem) 2385 return true 2386 } 2387 return false 2388 } 2389 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool { 2390 v_1 := v.Args[1] 2391 v_0 := v.Args[0] 2392 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 2393 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 2394 // result: (ADDSDload x [off] {sym} ptr mem) 2395 for { 2396 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2397 x := v_0 2398 l := v_1 2399 if l.Op != OpAMD64MOVSDload { 2400 continue 2401 } 2402 off := auxIntToInt32(l.AuxInt) 2403 sym := auxToSym(l.Aux) 2404 mem := l.Args[1] 2405 ptr := l.Args[0] 2406 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 2407 continue 2408 } 2409 v.reset(OpAMD64ADDSDload) 2410 v.AuxInt = int32ToAuxInt(off) 2411 v.Aux = symToAux(sym) 2412 v.AddArg3(x, ptr, mem) 2413 return true 2414 } 2415 break 2416 } 2417 return false 2418 } 2419 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { 2420 v_2 := v.Args[2] 2421 v_1 := v.Args[1] 2422 v_0 := v.Args[0] 2423 b := v.Block 2424 typ := &b.Func.Config.Types 2425 // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem) 2426 // cond: is32Bit(int64(off1)+int64(off2)) 2427 // result: (ADDSDload [off1+off2] {sym} val base mem) 2428 for { 2429 off1 := auxIntToInt32(v.AuxInt) 2430 sym := auxToSym(v.Aux) 2431 val := v_0 2432 if v_1.Op != OpAMD64ADDQconst { 2433 break 2434 } 2435 off2 := auxIntToInt32(v_1.AuxInt) 2436 base := v_1.Args[0] 2437 mem := v_2 2438 if !(is32Bit(int64(off1) + int64(off2))) { 2439 break 2440 } 2441 v.reset(OpAMD64ADDSDload) 2442 v.AuxInt = int32ToAuxInt(off1 + off2) 2443 v.Aux = symToAux(sym) 2444 v.AddArg3(val, base, mem) 2445 return true 2446 } 2447 // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2448 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 2449 // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2450 for { 2451 off1 := auxIntToInt32(v.AuxInt) 2452 sym1 := auxToSym(v.Aux) 2453 val := v_0 2454 if v_1.Op != OpAMD64LEAQ { 2455 break 2456 } 2457 off2 := auxIntToInt32(v_1.AuxInt) 2458 sym2 := auxToSym(v_1.Aux) 2459 base := v_1.Args[0] 2460 mem := v_2 2461 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 2462 break 2463 } 2464 v.reset(OpAMD64ADDSDload) 2465 v.AuxInt = int32ToAuxInt(off1 + off2) 2466 v.Aux = symToAux(mergeSym(sym1, sym2)) 2467 v.AddArg3(val, base, mem) 2468 return true 2469 } 2470 // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 2471 // result: (ADDSD x (MOVQi2f y)) 2472 for { 2473 off := auxIntToInt32(v.AuxInt) 2474 sym := auxToSym(v.Aux) 2475 x := v_0 2476 ptr := v_1 2477 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 2478 break 2479 } 2480 y := v_2.Args[1] 2481 if ptr != v_2.Args[0] { 2482 break 2483 } 2484 v.reset(OpAMD64ADDSD) 2485 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) 2486 v0.AddArg(y) 2487 v.AddArg2(x, v0) 2488 return true 2489 } 2490 return false 2491 } 2492 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool { 2493 v_1 := v.Args[1] 2494 v_0 := v.Args[0] 2495 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 2496 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 2497 // result: (ADDSSload x [off] {sym} ptr mem) 2498 for { 2499 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2500 x := v_0 2501 l := v_1 2502 if l.Op != OpAMD64MOVSSload { 2503 continue 2504 } 2505 off := auxIntToInt32(l.AuxInt) 2506 sym := auxToSym(l.Aux) 2507 mem := l.Args[1] 2508 ptr := l.Args[0] 2509 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 2510 continue 2511 } 2512 v.reset(OpAMD64ADDSSload) 2513 v.AuxInt = int32ToAuxInt(off) 2514 v.Aux = symToAux(sym) 2515 v.AddArg3(x, ptr, mem) 2516 return true 2517 } 2518 break 2519 } 2520 return false 2521 } 2522 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { 2523 v_2 := v.Args[2] 2524 v_1 := v.Args[1] 2525 v_0 := v.Args[0] 2526 b := v.Block 2527 typ := &b.Func.Config.Types 2528 // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem) 2529 // cond: is32Bit(int64(off1)+int64(off2)) 2530 // result: (ADDSSload [off1+off2] {sym} val base mem) 2531 for { 2532 off1 := auxIntToInt32(v.AuxInt) 2533 sym := auxToSym(v.Aux) 2534 val := v_0 2535 if v_1.Op != OpAMD64ADDQconst { 2536 break 2537 } 2538 off2 := auxIntToInt32(v_1.AuxInt) 2539 base := v_1.Args[0] 2540 mem := v_2 2541 if !(is32Bit(int64(off1) + int64(off2))) { 2542 break 2543 } 2544 v.reset(OpAMD64ADDSSload) 2545 v.AuxInt = int32ToAuxInt(off1 + off2) 2546 v.Aux = symToAux(sym) 2547 v.AddArg3(val, base, mem) 2548 return true 2549 } 2550 // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2551 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 2552 // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2553 for { 2554 off1 := auxIntToInt32(v.AuxInt) 2555 sym1 := auxToSym(v.Aux) 2556 val := v_0 2557 if v_1.Op != OpAMD64LEAQ { 2558 break 2559 } 2560 off2 := auxIntToInt32(v_1.AuxInt) 2561 sym2 := auxToSym(v_1.Aux) 2562 base := v_1.Args[0] 2563 mem := v_2 2564 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 2565 break 2566 } 2567 v.reset(OpAMD64ADDSSload) 2568 v.AuxInt = int32ToAuxInt(off1 + off2) 2569 v.Aux = symToAux(mergeSym(sym1, sym2)) 2570 v.AddArg3(val, base, mem) 2571 return true 2572 } 2573 // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 2574 // result: (ADDSS x (MOVLi2f y)) 2575 for { 2576 off := auxIntToInt32(v.AuxInt) 2577 sym := auxToSym(v.Aux) 2578 x := v_0 2579 ptr := v_1 2580 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 2581 break 2582 } 2583 y := v_2.Args[1] 2584 if ptr != v_2.Args[0] { 2585 break 2586 } 2587 v.reset(OpAMD64ADDSS) 2588 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) 2589 v0.AddArg(y) 2590 v.AddArg2(x, v0) 2591 return true 2592 } 2593 return false 2594 } 2595 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { 2596 v_1 := v.Args[1] 2597 v_0 := v.Args[0] 2598 b := v.Block 2599 typ := &b.Func.Config.Types 2600 // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x) 2601 // result: (BTRL x y) 2602 for { 2603 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2604 if v_0.Op != OpAMD64NOTL { 2605 continue 2606 } 2607 v_0_0 := v_0.Args[0] 2608 if v_0_0.Op != OpAMD64SHLL { 2609 continue 2610 } 2611 y := v_0_0.Args[1] 2612 v_0_0_0 := v_0_0.Args[0] 2613 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { 2614 continue 2615 } 2616 x := v_1 2617 v.reset(OpAMD64BTRL) 2618 v.AddArg2(x, y) 2619 return true 2620 } 2621 break 2622 } 2623 // match: (ANDL x (MOVLconst [c])) 2624 // result: (ANDLconst [c] x) 2625 for { 2626 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2627 x := v_0 2628 if v_1.Op != OpAMD64MOVLconst { 2629 continue 2630 } 2631 c := auxIntToInt32(v_1.AuxInt) 2632 v.reset(OpAMD64ANDLconst) 2633 v.AuxInt = int32ToAuxInt(c) 2634 v.AddArg(x) 2635 return true 2636 } 2637 break 2638 } 2639 // match: (ANDL x x) 2640 // result: x 2641 for { 2642 x := v_0 2643 if x != v_1 { 2644 break 2645 } 2646 v.copyOf(x) 2647 return true 2648 } 2649 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 2650 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 2651 // result: (ANDLload x [off] {sym} ptr mem) 2652 for { 2653 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2654 x := v_0 2655 l := v_1 2656 if l.Op != OpAMD64MOVLload { 2657 continue 2658 } 2659 off := auxIntToInt32(l.AuxInt) 2660 sym := auxToSym(l.Aux) 2661 mem := l.Args[1] 2662 ptr := l.Args[0] 2663 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 2664 continue 2665 } 2666 v.reset(OpAMD64ANDLload) 2667 v.AuxInt = int32ToAuxInt(off) 2668 v.Aux = symToAux(sym) 2669 v.AddArg3(x, ptr, mem) 2670 return true 2671 } 2672 break 2673 } 2674 // match: (ANDL x (NOTL y)) 2675 // cond: buildcfg.GOAMD64 >= 3 2676 // result: (ANDNL x y) 2677 for { 2678 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2679 x := v_0 2680 if v_1.Op != OpAMD64NOTL { 2681 continue 2682 } 2683 y := v_1.Args[0] 2684 if !(buildcfg.GOAMD64 >= 3) { 2685 continue 2686 } 2687 v.reset(OpAMD64ANDNL) 2688 v.AddArg2(x, y) 2689 return true 2690 } 2691 break 2692 } 2693 // match: (ANDL x (NEGL x)) 2694 // cond: buildcfg.GOAMD64 >= 3 2695 // result: (BLSIL x) 2696 for { 2697 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2698 x := v_0 2699 if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { 2700 continue 2701 } 2702 v.reset(OpAMD64BLSIL) 2703 v.AddArg(x) 2704 return true 2705 } 2706 break 2707 } 2708 // match: (ANDL <t> x (ADDLconst [-1] x)) 2709 // cond: buildcfg.GOAMD64 >= 3 2710 // result: (Select0 <t> (BLSRL x)) 2711 for { 2712 t := v.Type 2713 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 2714 x := v_0 2715 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { 2716 continue 2717 } 2718 v.reset(OpSelect0) 2719 v.Type = t 2720 v0 := b.NewValue0(v.Pos, OpAMD64BLSRL, types.NewTuple(typ.UInt32, types.TypeFlags)) 2721 v0.AddArg(x) 2722 v.AddArg(v0) 2723 return true 2724 } 2725 break 2726 } 2727 return false 2728 } 2729 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool { 2730 v_0 := v.Args[0] 2731 // match: (ANDLconst [c] (ANDLconst [d] x)) 2732 // result: (ANDLconst [c & d] x) 2733 for { 2734 c := auxIntToInt32(v.AuxInt) 2735 if v_0.Op != OpAMD64ANDLconst { 2736 break 2737 } 2738 d := auxIntToInt32(v_0.AuxInt) 2739 x := v_0.Args[0] 2740 v.reset(OpAMD64ANDLconst) 2741 v.AuxInt = int32ToAuxInt(c & d) 2742 v.AddArg(x) 2743 return true 2744 } 2745 // match: (ANDLconst [ 0xFF] x) 2746 // result: (MOVBQZX x) 2747 for { 2748 if auxIntToInt32(v.AuxInt) != 0xFF { 2749 break 2750 } 2751 x := v_0 2752 v.reset(OpAMD64MOVBQZX) 2753 v.AddArg(x) 2754 return true 2755 } 2756 // match: (ANDLconst [0xFFFF] x) 2757 // result: (MOVWQZX x) 2758 for { 2759 if auxIntToInt32(v.AuxInt) != 0xFFFF { 2760 break 2761 } 2762 x := v_0 2763 v.reset(OpAMD64MOVWQZX) 2764 v.AddArg(x) 2765 return true 2766 } 2767 // match: (ANDLconst [c] _) 2768 // cond: c==0 2769 // result: (MOVLconst [0]) 2770 for { 2771 c := auxIntToInt32(v.AuxInt) 2772 if !(c == 0) { 2773 break 2774 } 2775 v.reset(OpAMD64MOVLconst) 2776 v.AuxInt = int32ToAuxInt(0) 2777 return true 2778 } 2779 // match: (ANDLconst [c] x) 2780 // cond: c==-1 2781 // result: x 2782 for { 2783 c := auxIntToInt32(v.AuxInt) 2784 x := v_0 2785 if !(c == -1) { 2786 break 2787 } 2788 v.copyOf(x) 2789 return true 2790 } 2791 // match: (ANDLconst [c] (MOVLconst [d])) 2792 // result: (MOVLconst [c&d]) 2793 for { 2794 c := auxIntToInt32(v.AuxInt) 2795 if v_0.Op != OpAMD64MOVLconst { 2796 break 2797 } 2798 d := auxIntToInt32(v_0.AuxInt) 2799 v.reset(OpAMD64MOVLconst) 2800 v.AuxInt = int32ToAuxInt(c & d) 2801 return true 2802 } 2803 return false 2804 } 2805 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool { 2806 v_1 := v.Args[1] 2807 v_0 := v.Args[0] 2808 // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 2809 // cond: ValAndOff(valoff1).canAdd32(off2) 2810 // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 2811 for { 2812 valoff1 := auxIntToValAndOff(v.AuxInt) 2813 sym := auxToSym(v.Aux) 2814 if v_0.Op != OpAMD64ADDQconst { 2815 break 2816 } 2817 off2 := auxIntToInt32(v_0.AuxInt) 2818 base := v_0.Args[0] 2819 mem := v_1 2820 if !(ValAndOff(valoff1).canAdd32(off2)) { 2821 break 2822 } 2823 v.reset(OpAMD64ANDLconstmodify) 2824 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 2825 v.Aux = symToAux(sym) 2826 v.AddArg2(base, mem) 2827 return true 2828 } 2829 // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 2830 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 2831 // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 2832 for { 2833 valoff1 := auxIntToValAndOff(v.AuxInt) 2834 sym1 := auxToSym(v.Aux) 2835 if v_0.Op != OpAMD64LEAQ { 2836 break 2837 } 2838 off2 := auxIntToInt32(v_0.AuxInt) 2839 sym2 := auxToSym(v_0.Aux) 2840 base := v_0.Args[0] 2841 mem := v_1 2842 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 2843 break 2844 } 2845 v.reset(OpAMD64ANDLconstmodify) 2846 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 2847 v.Aux = symToAux(mergeSym(sym1, sym2)) 2848 v.AddArg2(base, mem) 2849 return true 2850 } 2851 return false 2852 } 2853 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { 2854 v_2 := v.Args[2] 2855 v_1 := v.Args[1] 2856 v_0 := v.Args[0] 2857 b := v.Block 2858 typ := &b.Func.Config.Types 2859 // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem) 2860 // cond: is32Bit(int64(off1)+int64(off2)) 2861 // result: (ANDLload [off1+off2] {sym} val base mem) 2862 for { 2863 off1 := auxIntToInt32(v.AuxInt) 2864 sym := auxToSym(v.Aux) 2865 val := v_0 2866 if v_1.Op != OpAMD64ADDQconst { 2867 break 2868 } 2869 off2 := auxIntToInt32(v_1.AuxInt) 2870 base := v_1.Args[0] 2871 mem := v_2 2872 if !(is32Bit(int64(off1) + int64(off2))) { 2873 break 2874 } 2875 v.reset(OpAMD64ANDLload) 2876 v.AuxInt = int32ToAuxInt(off1 + off2) 2877 v.Aux = symToAux(sym) 2878 v.AddArg3(val, base, mem) 2879 return true 2880 } 2881 // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2882 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 2883 // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2884 for { 2885 off1 := auxIntToInt32(v.AuxInt) 2886 sym1 := auxToSym(v.Aux) 2887 val := v_0 2888 if v_1.Op != OpAMD64LEAQ { 2889 break 2890 } 2891 off2 := auxIntToInt32(v_1.AuxInt) 2892 sym2 := auxToSym(v_1.Aux) 2893 base := v_1.Args[0] 2894 mem := v_2 2895 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 2896 break 2897 } 2898 v.reset(OpAMD64ANDLload) 2899 v.AuxInt = int32ToAuxInt(off1 + off2) 2900 v.Aux = symToAux(mergeSym(sym1, sym2)) 2901 v.AddArg3(val, base, mem) 2902 return true 2903 } 2904 // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 2905 // result: (ANDL x (MOVLf2i y)) 2906 for { 2907 off := auxIntToInt32(v.AuxInt) 2908 sym := auxToSym(v.Aux) 2909 x := v_0 2910 ptr := v_1 2911 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 2912 break 2913 } 2914 y := v_2.Args[1] 2915 if ptr != v_2.Args[0] { 2916 break 2917 } 2918 v.reset(OpAMD64ANDL) 2919 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 2920 v0.AddArg(y) 2921 v.AddArg2(x, v0) 2922 return true 2923 } 2924 return false 2925 } 2926 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool { 2927 v_2 := v.Args[2] 2928 v_1 := v.Args[1] 2929 v_0 := v.Args[0] 2930 // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 2931 // cond: is32Bit(int64(off1)+int64(off2)) 2932 // result: (ANDLmodify [off1+off2] {sym} base val mem) 2933 for { 2934 off1 := auxIntToInt32(v.AuxInt) 2935 sym := auxToSym(v.Aux) 2936 if v_0.Op != OpAMD64ADDQconst { 2937 break 2938 } 2939 off2 := auxIntToInt32(v_0.AuxInt) 2940 base := v_0.Args[0] 2941 val := v_1 2942 mem := v_2 2943 if !(is32Bit(int64(off1) + int64(off2))) { 2944 break 2945 } 2946 v.reset(OpAMD64ANDLmodify) 2947 v.AuxInt = int32ToAuxInt(off1 + off2) 2948 v.Aux = symToAux(sym) 2949 v.AddArg3(base, val, mem) 2950 return true 2951 } 2952 // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 2953 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 2954 // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 2955 for { 2956 off1 := auxIntToInt32(v.AuxInt) 2957 sym1 := auxToSym(v.Aux) 2958 if v_0.Op != OpAMD64LEAQ { 2959 break 2960 } 2961 off2 := auxIntToInt32(v_0.AuxInt) 2962 sym2 := auxToSym(v_0.Aux) 2963 base := v_0.Args[0] 2964 val := v_1 2965 mem := v_2 2966 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 2967 break 2968 } 2969 v.reset(OpAMD64ANDLmodify) 2970 v.AuxInt = int32ToAuxInt(off1 + off2) 2971 v.Aux = symToAux(mergeSym(sym1, sym2)) 2972 v.AddArg3(base, val, mem) 2973 return true 2974 } 2975 return false 2976 } 2977 func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool { 2978 v_1 := v.Args[1] 2979 v_0 := v.Args[0] 2980 // match: (ANDNL x (SHLL (MOVLconst [1]) y)) 2981 // result: (BTRL x y) 2982 for { 2983 x := v_0 2984 if v_1.Op != OpAMD64SHLL { 2985 break 2986 } 2987 y := v_1.Args[1] 2988 v_1_0 := v_1.Args[0] 2989 if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 { 2990 break 2991 } 2992 v.reset(OpAMD64BTRL) 2993 v.AddArg2(x, y) 2994 return true 2995 } 2996 return false 2997 } 2998 func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool { 2999 v_1 := v.Args[1] 3000 v_0 := v.Args[0] 3001 // match: (ANDNQ x (SHLQ (MOVQconst [1]) y)) 3002 // result: (BTRQ x y) 3003 for { 3004 x := v_0 3005 if v_1.Op != OpAMD64SHLQ { 3006 break 3007 } 3008 y := v_1.Args[1] 3009 v_1_0 := v_1.Args[0] 3010 if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 { 3011 break 3012 } 3013 v.reset(OpAMD64BTRQ) 3014 v.AddArg2(x, y) 3015 return true 3016 } 3017 return false 3018 } 3019 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { 3020 v_1 := v.Args[1] 3021 v_0 := v.Args[0] 3022 b := v.Block 3023 typ := &b.Func.Config.Types 3024 // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x) 3025 // result: (BTRQ x y) 3026 for { 3027 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3028 if v_0.Op != OpAMD64NOTQ { 3029 continue 3030 } 3031 v_0_0 := v_0.Args[0] 3032 if v_0_0.Op != OpAMD64SHLQ { 3033 continue 3034 } 3035 y := v_0_0.Args[1] 3036 v_0_0_0 := v_0_0.Args[0] 3037 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { 3038 continue 3039 } 3040 x := v_1 3041 v.reset(OpAMD64BTRQ) 3042 v.AddArg2(x, y) 3043 return true 3044 } 3045 break 3046 } 3047 // match: (ANDQ (MOVQconst [c]) x) 3048 // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31 3049 // result: (BTRQconst [int8(log64(^c))] x) 3050 for { 3051 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3052 if v_0.Op != OpAMD64MOVQconst { 3053 continue 3054 } 3055 c := auxIntToInt64(v_0.AuxInt) 3056 x := v_1 3057 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) { 3058 continue 3059 } 3060 v.reset(OpAMD64BTRQconst) 3061 v.AuxInt = int8ToAuxInt(int8(log64(^c))) 3062 v.AddArg(x) 3063 return true 3064 } 3065 break 3066 } 3067 // match: (ANDQ x (MOVQconst [c])) 3068 // cond: is32Bit(c) 3069 // result: (ANDQconst [int32(c)] x) 3070 for { 3071 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3072 x := v_0 3073 if v_1.Op != OpAMD64MOVQconst { 3074 continue 3075 } 3076 c := auxIntToInt64(v_1.AuxInt) 3077 if !(is32Bit(c)) { 3078 continue 3079 } 3080 v.reset(OpAMD64ANDQconst) 3081 v.AuxInt = int32ToAuxInt(int32(c)) 3082 v.AddArg(x) 3083 return true 3084 } 3085 break 3086 } 3087 // match: (ANDQ x x) 3088 // result: x 3089 for { 3090 x := v_0 3091 if x != v_1 { 3092 break 3093 } 3094 v.copyOf(x) 3095 return true 3096 } 3097 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 3098 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3099 // result: (ANDQload x [off] {sym} ptr mem) 3100 for { 3101 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3102 x := v_0 3103 l := v_1 3104 if l.Op != OpAMD64MOVQload { 3105 continue 3106 } 3107 off := auxIntToInt32(l.AuxInt) 3108 sym := auxToSym(l.Aux) 3109 mem := l.Args[1] 3110 ptr := l.Args[0] 3111 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3112 continue 3113 } 3114 v.reset(OpAMD64ANDQload) 3115 v.AuxInt = int32ToAuxInt(off) 3116 v.Aux = symToAux(sym) 3117 v.AddArg3(x, ptr, mem) 3118 return true 3119 } 3120 break 3121 } 3122 // match: (ANDQ x (NOTQ y)) 3123 // cond: buildcfg.GOAMD64 >= 3 3124 // result: (ANDNQ x y) 3125 for { 3126 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3127 x := v_0 3128 if v_1.Op != OpAMD64NOTQ { 3129 continue 3130 } 3131 y := v_1.Args[0] 3132 if !(buildcfg.GOAMD64 >= 3) { 3133 continue 3134 } 3135 v.reset(OpAMD64ANDNQ) 3136 v.AddArg2(x, y) 3137 return true 3138 } 3139 break 3140 } 3141 // match: (ANDQ x (NEGQ x)) 3142 // cond: buildcfg.GOAMD64 >= 3 3143 // result: (BLSIQ x) 3144 for { 3145 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3146 x := v_0 3147 if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { 3148 continue 3149 } 3150 v.reset(OpAMD64BLSIQ) 3151 v.AddArg(x) 3152 return true 3153 } 3154 break 3155 } 3156 // match: (ANDQ <t> x (ADDQconst [-1] x)) 3157 // cond: buildcfg.GOAMD64 >= 3 3158 // result: (Select0 <t> (BLSRQ x)) 3159 for { 3160 t := v.Type 3161 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 3162 x := v_0 3163 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { 3164 continue 3165 } 3166 v.reset(OpSelect0) 3167 v.Type = t 3168 v0 := b.NewValue0(v.Pos, OpAMD64BLSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 3169 v0.AddArg(x) 3170 v.AddArg(v0) 3171 return true 3172 } 3173 break 3174 } 3175 return false 3176 } 3177 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool { 3178 v_0 := v.Args[0] 3179 // match: (ANDQconst [c] (ANDQconst [d] x)) 3180 // result: (ANDQconst [c & d] x) 3181 for { 3182 c := auxIntToInt32(v.AuxInt) 3183 if v_0.Op != OpAMD64ANDQconst { 3184 break 3185 } 3186 d := auxIntToInt32(v_0.AuxInt) 3187 x := v_0.Args[0] 3188 v.reset(OpAMD64ANDQconst) 3189 v.AuxInt = int32ToAuxInt(c & d) 3190 v.AddArg(x) 3191 return true 3192 } 3193 // match: (ANDQconst [ 0xFF] x) 3194 // result: (MOVBQZX x) 3195 for { 3196 if auxIntToInt32(v.AuxInt) != 0xFF { 3197 break 3198 } 3199 x := v_0 3200 v.reset(OpAMD64MOVBQZX) 3201 v.AddArg(x) 3202 return true 3203 } 3204 // match: (ANDQconst [0xFFFF] x) 3205 // result: (MOVWQZX x) 3206 for { 3207 if auxIntToInt32(v.AuxInt) != 0xFFFF { 3208 break 3209 } 3210 x := v_0 3211 v.reset(OpAMD64MOVWQZX) 3212 v.AddArg(x) 3213 return true 3214 } 3215 // match: (ANDQconst [0] _) 3216 // result: (MOVQconst [0]) 3217 for { 3218 if auxIntToInt32(v.AuxInt) != 0 { 3219 break 3220 } 3221 v.reset(OpAMD64MOVQconst) 3222 v.AuxInt = int64ToAuxInt(0) 3223 return true 3224 } 3225 // match: (ANDQconst [-1] x) 3226 // result: x 3227 for { 3228 if auxIntToInt32(v.AuxInt) != -1 { 3229 break 3230 } 3231 x := v_0 3232 v.copyOf(x) 3233 return true 3234 } 3235 // match: (ANDQconst [c] (MOVQconst [d])) 3236 // result: (MOVQconst [int64(c)&d]) 3237 for { 3238 c := auxIntToInt32(v.AuxInt) 3239 if v_0.Op != OpAMD64MOVQconst { 3240 break 3241 } 3242 d := auxIntToInt64(v_0.AuxInt) 3243 v.reset(OpAMD64MOVQconst) 3244 v.AuxInt = int64ToAuxInt(int64(c) & d) 3245 return true 3246 } 3247 return false 3248 } 3249 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool { 3250 v_1 := v.Args[1] 3251 v_0 := v.Args[0] 3252 // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 3253 // cond: ValAndOff(valoff1).canAdd32(off2) 3254 // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 3255 for { 3256 valoff1 := auxIntToValAndOff(v.AuxInt) 3257 sym := auxToSym(v.Aux) 3258 if v_0.Op != OpAMD64ADDQconst { 3259 break 3260 } 3261 off2 := auxIntToInt32(v_0.AuxInt) 3262 base := v_0.Args[0] 3263 mem := v_1 3264 if !(ValAndOff(valoff1).canAdd32(off2)) { 3265 break 3266 } 3267 v.reset(OpAMD64ANDQconstmodify) 3268 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 3269 v.Aux = symToAux(sym) 3270 v.AddArg2(base, mem) 3271 return true 3272 } 3273 // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 3274 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 3275 // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 3276 for { 3277 valoff1 := auxIntToValAndOff(v.AuxInt) 3278 sym1 := auxToSym(v.Aux) 3279 if v_0.Op != OpAMD64LEAQ { 3280 break 3281 } 3282 off2 := auxIntToInt32(v_0.AuxInt) 3283 sym2 := auxToSym(v_0.Aux) 3284 base := v_0.Args[0] 3285 mem := v_1 3286 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 3287 break 3288 } 3289 v.reset(OpAMD64ANDQconstmodify) 3290 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 3291 v.Aux = symToAux(mergeSym(sym1, sym2)) 3292 v.AddArg2(base, mem) 3293 return true 3294 } 3295 return false 3296 } 3297 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { 3298 v_2 := v.Args[2] 3299 v_1 := v.Args[1] 3300 v_0 := v.Args[0] 3301 b := v.Block 3302 typ := &b.Func.Config.Types 3303 // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem) 3304 // cond: is32Bit(int64(off1)+int64(off2)) 3305 // result: (ANDQload [off1+off2] {sym} val base mem) 3306 for { 3307 off1 := auxIntToInt32(v.AuxInt) 3308 sym := auxToSym(v.Aux) 3309 val := v_0 3310 if v_1.Op != OpAMD64ADDQconst { 3311 break 3312 } 3313 off2 := auxIntToInt32(v_1.AuxInt) 3314 base := v_1.Args[0] 3315 mem := v_2 3316 if !(is32Bit(int64(off1) + int64(off2))) { 3317 break 3318 } 3319 v.reset(OpAMD64ANDQload) 3320 v.AuxInt = int32ToAuxInt(off1 + off2) 3321 v.Aux = symToAux(sym) 3322 v.AddArg3(val, base, mem) 3323 return true 3324 } 3325 // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 3326 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 3327 // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 3328 for { 3329 off1 := auxIntToInt32(v.AuxInt) 3330 sym1 := auxToSym(v.Aux) 3331 val := v_0 3332 if v_1.Op != OpAMD64LEAQ { 3333 break 3334 } 3335 off2 := auxIntToInt32(v_1.AuxInt) 3336 sym2 := auxToSym(v_1.Aux) 3337 base := v_1.Args[0] 3338 mem := v_2 3339 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 3340 break 3341 } 3342 v.reset(OpAMD64ANDQload) 3343 v.AuxInt = int32ToAuxInt(off1 + off2) 3344 v.Aux = symToAux(mergeSym(sym1, sym2)) 3345 v.AddArg3(val, base, mem) 3346 return true 3347 } 3348 // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 3349 // result: (ANDQ x (MOVQf2i y)) 3350 for { 3351 off := auxIntToInt32(v.AuxInt) 3352 sym := auxToSym(v.Aux) 3353 x := v_0 3354 ptr := v_1 3355 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 3356 break 3357 } 3358 y := v_2.Args[1] 3359 if ptr != v_2.Args[0] { 3360 break 3361 } 3362 v.reset(OpAMD64ANDQ) 3363 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 3364 v0.AddArg(y) 3365 v.AddArg2(x, v0) 3366 return true 3367 } 3368 return false 3369 } 3370 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool { 3371 v_2 := v.Args[2] 3372 v_1 := v.Args[1] 3373 v_0 := v.Args[0] 3374 // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 3375 // cond: is32Bit(int64(off1)+int64(off2)) 3376 // result: (ANDQmodify [off1+off2] {sym} base val mem) 3377 for { 3378 off1 := auxIntToInt32(v.AuxInt) 3379 sym := auxToSym(v.Aux) 3380 if v_0.Op != OpAMD64ADDQconst { 3381 break 3382 } 3383 off2 := auxIntToInt32(v_0.AuxInt) 3384 base := v_0.Args[0] 3385 val := v_1 3386 mem := v_2 3387 if !(is32Bit(int64(off1) + int64(off2))) { 3388 break 3389 } 3390 v.reset(OpAMD64ANDQmodify) 3391 v.AuxInt = int32ToAuxInt(off1 + off2) 3392 v.Aux = symToAux(sym) 3393 v.AddArg3(base, val, mem) 3394 return true 3395 } 3396 // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 3397 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 3398 // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3399 for { 3400 off1 := auxIntToInt32(v.AuxInt) 3401 sym1 := auxToSym(v.Aux) 3402 if v_0.Op != OpAMD64LEAQ { 3403 break 3404 } 3405 off2 := auxIntToInt32(v_0.AuxInt) 3406 sym2 := auxToSym(v_0.Aux) 3407 base := v_0.Args[0] 3408 val := v_1 3409 mem := v_2 3410 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 3411 break 3412 } 3413 v.reset(OpAMD64ANDQmodify) 3414 v.AuxInt = int32ToAuxInt(off1 + off2) 3415 v.Aux = symToAux(mergeSym(sym1, sym2)) 3416 v.AddArg3(base, val, mem) 3417 return true 3418 } 3419 return false 3420 } 3421 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool { 3422 v_0 := v.Args[0] 3423 b := v.Block 3424 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 3425 // result: (BSFQ (ORQconst <t> [1<<8] x)) 3426 for { 3427 if v_0.Op != OpAMD64ORQconst { 3428 break 3429 } 3430 t := v_0.Type 3431 if auxIntToInt32(v_0.AuxInt) != 1<<8 { 3432 break 3433 } 3434 v_0_0 := v_0.Args[0] 3435 if v_0_0.Op != OpAMD64MOVBQZX { 3436 break 3437 } 3438 x := v_0_0.Args[0] 3439 v.reset(OpAMD64BSFQ) 3440 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 3441 v0.AuxInt = int32ToAuxInt(1 << 8) 3442 v0.AddArg(x) 3443 v.AddArg(v0) 3444 return true 3445 } 3446 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 3447 // result: (BSFQ (ORQconst <t> [1<<16] x)) 3448 for { 3449 if v_0.Op != OpAMD64ORQconst { 3450 break 3451 } 3452 t := v_0.Type 3453 if auxIntToInt32(v_0.AuxInt) != 1<<16 { 3454 break 3455 } 3456 v_0_0 := v_0.Args[0] 3457 if v_0_0.Op != OpAMD64MOVWQZX { 3458 break 3459 } 3460 x := v_0_0.Args[0] 3461 v.reset(OpAMD64BSFQ) 3462 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 3463 v0.AuxInt = int32ToAuxInt(1 << 16) 3464 v0.AddArg(x) 3465 v.AddArg(v0) 3466 return true 3467 } 3468 return false 3469 } 3470 func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool { 3471 v_0 := v.Args[0] 3472 b := v.Block 3473 typ := &b.Func.Config.Types 3474 // match: (BSWAPL (BSWAPL p)) 3475 // result: p 3476 for { 3477 if v_0.Op != OpAMD64BSWAPL { 3478 break 3479 } 3480 p := v_0.Args[0] 3481 v.copyOf(p) 3482 return true 3483 } 3484 // match: (BSWAPL x:(MOVLload [i] {s} p mem)) 3485 // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 3486 // result: @x.Block (MOVBELload [i] {s} p mem) 3487 for { 3488 x := v_0 3489 if x.Op != OpAMD64MOVLload { 3490 break 3491 } 3492 i := auxIntToInt32(x.AuxInt) 3493 s := auxToSym(x.Aux) 3494 mem := x.Args[1] 3495 p := x.Args[0] 3496 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { 3497 break 3498 } 3499 b = x.Block 3500 v0 := b.NewValue0(x.Pos, OpAMD64MOVBELload, typ.UInt32) 3501 v.copyOf(v0) 3502 v0.AuxInt = int32ToAuxInt(i) 3503 v0.Aux = symToAux(s) 3504 v0.AddArg2(p, mem) 3505 return true 3506 } 3507 // match: (BSWAPL x:(MOVBELload [i] {s} p mem)) 3508 // cond: x.Uses == 1 3509 // result: @x.Block (MOVLload [i] {s} p mem) 3510 for { 3511 x := v_0 3512 if x.Op != OpAMD64MOVBELload { 3513 break 3514 } 3515 i := auxIntToInt32(x.AuxInt) 3516 s := auxToSym(x.Aux) 3517 mem := x.Args[1] 3518 p := x.Args[0] 3519 if !(x.Uses == 1) { 3520 break 3521 } 3522 b = x.Block 3523 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, typ.UInt32) 3524 v.copyOf(v0) 3525 v0.AuxInt = int32ToAuxInt(i) 3526 v0.Aux = symToAux(s) 3527 v0.AddArg2(p, mem) 3528 return true 3529 } 3530 return false 3531 } 3532 func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool { 3533 v_0 := v.Args[0] 3534 b := v.Block 3535 typ := &b.Func.Config.Types 3536 // match: (BSWAPQ (BSWAPQ p)) 3537 // result: p 3538 for { 3539 if v_0.Op != OpAMD64BSWAPQ { 3540 break 3541 } 3542 p := v_0.Args[0] 3543 v.copyOf(p) 3544 return true 3545 } 3546 // match: (BSWAPQ x:(MOVQload [i] {s} p mem)) 3547 // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 3548 // result: @x.Block (MOVBEQload [i] {s} p mem) 3549 for { 3550 x := v_0 3551 if x.Op != OpAMD64MOVQload { 3552 break 3553 } 3554 i := auxIntToInt32(x.AuxInt) 3555 s := auxToSym(x.Aux) 3556 mem := x.Args[1] 3557 p := x.Args[0] 3558 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { 3559 break 3560 } 3561 b = x.Block 3562 v0 := b.NewValue0(x.Pos, OpAMD64MOVBEQload, typ.UInt64) 3563 v.copyOf(v0) 3564 v0.AuxInt = int32ToAuxInt(i) 3565 v0.Aux = symToAux(s) 3566 v0.AddArg2(p, mem) 3567 return true 3568 } 3569 // match: (BSWAPQ x:(MOVBEQload [i] {s} p mem)) 3570 // cond: x.Uses == 1 3571 // result: @x.Block (MOVQload [i] {s} p mem) 3572 for { 3573 x := v_0 3574 if x.Op != OpAMD64MOVBEQload { 3575 break 3576 } 3577 i := auxIntToInt32(x.AuxInt) 3578 s := auxToSym(x.Aux) 3579 mem := x.Args[1] 3580 p := x.Args[0] 3581 if !(x.Uses == 1) { 3582 break 3583 } 3584 b = x.Block 3585 v0 := b.NewValue0(x.Pos, OpAMD64MOVQload, typ.UInt64) 3586 v.copyOf(v0) 3587 v0.AuxInt = int32ToAuxInt(i) 3588 v0.Aux = symToAux(s) 3589 v0.AddArg2(p, mem) 3590 return true 3591 } 3592 return false 3593 } 3594 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool { 3595 v_0 := v.Args[0] 3596 // match: (BTCQconst [c] (MOVQconst [d])) 3597 // result: (MOVQconst [d^(1<<uint32(c))]) 3598 for { 3599 c := auxIntToInt8(v.AuxInt) 3600 if v_0.Op != OpAMD64MOVQconst { 3601 break 3602 } 3603 d := auxIntToInt64(v_0.AuxInt) 3604 v.reset(OpAMD64MOVQconst) 3605 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c))) 3606 return true 3607 } 3608 return false 3609 } 3610 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool { 3611 v_0 := v.Args[0] 3612 // match: (BTLconst [c] (SHRQconst [d] x)) 3613 // cond: (c+d)<64 3614 // result: (BTQconst [c+d] x) 3615 for { 3616 c := auxIntToInt8(v.AuxInt) 3617 if v_0.Op != OpAMD64SHRQconst { 3618 break 3619 } 3620 d := auxIntToInt8(v_0.AuxInt) 3621 x := v_0.Args[0] 3622 if !((c + d) < 64) { 3623 break 3624 } 3625 v.reset(OpAMD64BTQconst) 3626 v.AuxInt = int8ToAuxInt(c + d) 3627 v.AddArg(x) 3628 return true 3629 } 3630 // match: (BTLconst [c] (SHLQconst [d] x)) 3631 // cond: c>d 3632 // result: (BTLconst [c-d] x) 3633 for { 3634 c := auxIntToInt8(v.AuxInt) 3635 if v_0.Op != OpAMD64SHLQconst { 3636 break 3637 } 3638 d := auxIntToInt8(v_0.AuxInt) 3639 x := v_0.Args[0] 3640 if !(c > d) { 3641 break 3642 } 3643 v.reset(OpAMD64BTLconst) 3644 v.AuxInt = int8ToAuxInt(c - d) 3645 v.AddArg(x) 3646 return true 3647 } 3648 // match: (BTLconst [0] s:(SHRQ x y)) 3649 // result: (BTQ y x) 3650 for { 3651 if auxIntToInt8(v.AuxInt) != 0 { 3652 break 3653 } 3654 s := v_0 3655 if s.Op != OpAMD64SHRQ { 3656 break 3657 } 3658 y := s.Args[1] 3659 x := s.Args[0] 3660 v.reset(OpAMD64BTQ) 3661 v.AddArg2(y, x) 3662 return true 3663 } 3664 // match: (BTLconst [c] (SHRLconst [d] x)) 3665 // cond: (c+d)<32 3666 // result: (BTLconst [c+d] x) 3667 for { 3668 c := auxIntToInt8(v.AuxInt) 3669 if v_0.Op != OpAMD64SHRLconst { 3670 break 3671 } 3672 d := auxIntToInt8(v_0.AuxInt) 3673 x := v_0.Args[0] 3674 if !((c + d) < 32) { 3675 break 3676 } 3677 v.reset(OpAMD64BTLconst) 3678 v.AuxInt = int8ToAuxInt(c + d) 3679 v.AddArg(x) 3680 return true 3681 } 3682 // match: (BTLconst [c] (SHLLconst [d] x)) 3683 // cond: c>d 3684 // result: (BTLconst [c-d] x) 3685 for { 3686 c := auxIntToInt8(v.AuxInt) 3687 if v_0.Op != OpAMD64SHLLconst { 3688 break 3689 } 3690 d := auxIntToInt8(v_0.AuxInt) 3691 x := v_0.Args[0] 3692 if !(c > d) { 3693 break 3694 } 3695 v.reset(OpAMD64BTLconst) 3696 v.AuxInt = int8ToAuxInt(c - d) 3697 v.AddArg(x) 3698 return true 3699 } 3700 // match: (BTLconst [0] s:(SHRL x y)) 3701 // result: (BTL y x) 3702 for { 3703 if auxIntToInt8(v.AuxInt) != 0 { 3704 break 3705 } 3706 s := v_0 3707 if s.Op != OpAMD64SHRL { 3708 break 3709 } 3710 y := s.Args[1] 3711 x := s.Args[0] 3712 v.reset(OpAMD64BTL) 3713 v.AddArg2(y, x) 3714 return true 3715 } 3716 // match: (BTLconst [0] s:(SHRXL x y)) 3717 // result: (BTL y x) 3718 for { 3719 if auxIntToInt8(v.AuxInt) != 0 { 3720 break 3721 } 3722 s := v_0 3723 if s.Op != OpAMD64SHRXL { 3724 break 3725 } 3726 y := s.Args[1] 3727 x := s.Args[0] 3728 v.reset(OpAMD64BTL) 3729 v.AddArg2(y, x) 3730 return true 3731 } 3732 return false 3733 } 3734 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool { 3735 v_0 := v.Args[0] 3736 // match: (BTQconst [c] (SHRQconst [d] x)) 3737 // cond: (c+d)<64 3738 // result: (BTQconst [c+d] x) 3739 for { 3740 c := auxIntToInt8(v.AuxInt) 3741 if v_0.Op != OpAMD64SHRQconst { 3742 break 3743 } 3744 d := auxIntToInt8(v_0.AuxInt) 3745 x := v_0.Args[0] 3746 if !((c + d) < 64) { 3747 break 3748 } 3749 v.reset(OpAMD64BTQconst) 3750 v.AuxInt = int8ToAuxInt(c + d) 3751 v.AddArg(x) 3752 return true 3753 } 3754 // match: (BTQconst [c] (SHLQconst [d] x)) 3755 // cond: c>d 3756 // result: (BTQconst [c-d] x) 3757 for { 3758 c := auxIntToInt8(v.AuxInt) 3759 if v_0.Op != OpAMD64SHLQconst { 3760 break 3761 } 3762 d := auxIntToInt8(v_0.AuxInt) 3763 x := v_0.Args[0] 3764 if !(c > d) { 3765 break 3766 } 3767 v.reset(OpAMD64BTQconst) 3768 v.AuxInt = int8ToAuxInt(c - d) 3769 v.AddArg(x) 3770 return true 3771 } 3772 // match: (BTQconst [0] s:(SHRQ x y)) 3773 // result: (BTQ y x) 3774 for { 3775 if auxIntToInt8(v.AuxInt) != 0 { 3776 break 3777 } 3778 s := v_0 3779 if s.Op != OpAMD64SHRQ { 3780 break 3781 } 3782 y := s.Args[1] 3783 x := s.Args[0] 3784 v.reset(OpAMD64BTQ) 3785 v.AddArg2(y, x) 3786 return true 3787 } 3788 return false 3789 } 3790 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool { 3791 v_0 := v.Args[0] 3792 // match: (BTRQconst [c] (BTSQconst [c] x)) 3793 // result: (BTRQconst [c] x) 3794 for { 3795 c := auxIntToInt8(v.AuxInt) 3796 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c { 3797 break 3798 } 3799 x := v_0.Args[0] 3800 v.reset(OpAMD64BTRQconst) 3801 v.AuxInt = int8ToAuxInt(c) 3802 v.AddArg(x) 3803 return true 3804 } 3805 // match: (BTRQconst [c] (BTCQconst [c] x)) 3806 // result: (BTRQconst [c] x) 3807 for { 3808 c := auxIntToInt8(v.AuxInt) 3809 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c { 3810 break 3811 } 3812 x := v_0.Args[0] 3813 v.reset(OpAMD64BTRQconst) 3814 v.AuxInt = int8ToAuxInt(c) 3815 v.AddArg(x) 3816 return true 3817 } 3818 // match: (BTRQconst [c] (MOVQconst [d])) 3819 // result: (MOVQconst [d&^(1<<uint32(c))]) 3820 for { 3821 c := auxIntToInt8(v.AuxInt) 3822 if v_0.Op != OpAMD64MOVQconst { 3823 break 3824 } 3825 d := auxIntToInt64(v_0.AuxInt) 3826 v.reset(OpAMD64MOVQconst) 3827 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c))) 3828 return true 3829 } 3830 return false 3831 } 3832 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool { 3833 v_0 := v.Args[0] 3834 // match: (BTSQconst [c] (BTRQconst [c] x)) 3835 // result: (BTSQconst [c] x) 3836 for { 3837 c := auxIntToInt8(v.AuxInt) 3838 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c { 3839 break 3840 } 3841 x := v_0.Args[0] 3842 v.reset(OpAMD64BTSQconst) 3843 v.AuxInt = int8ToAuxInt(c) 3844 v.AddArg(x) 3845 return true 3846 } 3847 // match: (BTSQconst [c] (BTCQconst [c] x)) 3848 // result: (BTSQconst [c] x) 3849 for { 3850 c := auxIntToInt8(v.AuxInt) 3851 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c { 3852 break 3853 } 3854 x := v_0.Args[0] 3855 v.reset(OpAMD64BTSQconst) 3856 v.AuxInt = int8ToAuxInt(c) 3857 v.AddArg(x) 3858 return true 3859 } 3860 // match: (BTSQconst [c] (MOVQconst [d])) 3861 // result: (MOVQconst [d|(1<<uint32(c))]) 3862 for { 3863 c := auxIntToInt8(v.AuxInt) 3864 if v_0.Op != OpAMD64MOVQconst { 3865 break 3866 } 3867 d := auxIntToInt64(v_0.AuxInt) 3868 v.reset(OpAMD64MOVQconst) 3869 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c))) 3870 return true 3871 } 3872 return false 3873 } 3874 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool { 3875 v_2 := v.Args[2] 3876 v_1 := v.Args[1] 3877 v_0 := v.Args[0] 3878 // match: (CMOVLCC x y (InvertFlags cond)) 3879 // result: (CMOVLLS x y cond) 3880 for { 3881 x := v_0 3882 y := v_1 3883 if v_2.Op != OpAMD64InvertFlags { 3884 break 3885 } 3886 cond := v_2.Args[0] 3887 v.reset(OpAMD64CMOVLLS) 3888 v.AddArg3(x, y, cond) 3889 return true 3890 } 3891 // match: (CMOVLCC _ x (FlagEQ)) 3892 // result: x 3893 for { 3894 x := v_1 3895 if v_2.Op != OpAMD64FlagEQ { 3896 break 3897 } 3898 v.copyOf(x) 3899 return true 3900 } 3901 // match: (CMOVLCC _ x (FlagGT_UGT)) 3902 // result: x 3903 for { 3904 x := v_1 3905 if v_2.Op != OpAMD64FlagGT_UGT { 3906 break 3907 } 3908 v.copyOf(x) 3909 return true 3910 } 3911 // match: (CMOVLCC y _ (FlagGT_ULT)) 3912 // result: y 3913 for { 3914 y := v_0 3915 if v_2.Op != OpAMD64FlagGT_ULT { 3916 break 3917 } 3918 v.copyOf(y) 3919 return true 3920 } 3921 // match: (CMOVLCC y _ (FlagLT_ULT)) 3922 // result: y 3923 for { 3924 y := v_0 3925 if v_2.Op != OpAMD64FlagLT_ULT { 3926 break 3927 } 3928 v.copyOf(y) 3929 return true 3930 } 3931 // match: (CMOVLCC _ x (FlagLT_UGT)) 3932 // result: x 3933 for { 3934 x := v_1 3935 if v_2.Op != OpAMD64FlagLT_UGT { 3936 break 3937 } 3938 v.copyOf(x) 3939 return true 3940 } 3941 return false 3942 } 3943 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool { 3944 v_2 := v.Args[2] 3945 v_1 := v.Args[1] 3946 v_0 := v.Args[0] 3947 // match: (CMOVLCS x y (InvertFlags cond)) 3948 // result: (CMOVLHI x y cond) 3949 for { 3950 x := v_0 3951 y := v_1 3952 if v_2.Op != OpAMD64InvertFlags { 3953 break 3954 } 3955 cond := v_2.Args[0] 3956 v.reset(OpAMD64CMOVLHI) 3957 v.AddArg3(x, y, cond) 3958 return true 3959 } 3960 // match: (CMOVLCS y _ (FlagEQ)) 3961 // result: y 3962 for { 3963 y := v_0 3964 if v_2.Op != OpAMD64FlagEQ { 3965 break 3966 } 3967 v.copyOf(y) 3968 return true 3969 } 3970 // match: (CMOVLCS y _ (FlagGT_UGT)) 3971 // result: y 3972 for { 3973 y := v_0 3974 if v_2.Op != OpAMD64FlagGT_UGT { 3975 break 3976 } 3977 v.copyOf(y) 3978 return true 3979 } 3980 // match: (CMOVLCS _ x (FlagGT_ULT)) 3981 // result: x 3982 for { 3983 x := v_1 3984 if v_2.Op != OpAMD64FlagGT_ULT { 3985 break 3986 } 3987 v.copyOf(x) 3988 return true 3989 } 3990 // match: (CMOVLCS _ x (FlagLT_ULT)) 3991 // result: x 3992 for { 3993 x := v_1 3994 if v_2.Op != OpAMD64FlagLT_ULT { 3995 break 3996 } 3997 v.copyOf(x) 3998 return true 3999 } 4000 // match: (CMOVLCS y _ (FlagLT_UGT)) 4001 // result: y 4002 for { 4003 y := v_0 4004 if v_2.Op != OpAMD64FlagLT_UGT { 4005 break 4006 } 4007 v.copyOf(y) 4008 return true 4009 } 4010 return false 4011 } 4012 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool { 4013 v_2 := v.Args[2] 4014 v_1 := v.Args[1] 4015 v_0 := v.Args[0] 4016 b := v.Block 4017 // match: (CMOVLEQ x y (InvertFlags cond)) 4018 // result: (CMOVLEQ x y cond) 4019 for { 4020 x := v_0 4021 y := v_1 4022 if v_2.Op != OpAMD64InvertFlags { 4023 break 4024 } 4025 cond := v_2.Args[0] 4026 v.reset(OpAMD64CMOVLEQ) 4027 v.AddArg3(x, y, cond) 4028 return true 4029 } 4030 // match: (CMOVLEQ _ x (FlagEQ)) 4031 // result: x 4032 for { 4033 x := v_1 4034 if v_2.Op != OpAMD64FlagEQ { 4035 break 4036 } 4037 v.copyOf(x) 4038 return true 4039 } 4040 // match: (CMOVLEQ y _ (FlagGT_UGT)) 4041 // result: y 4042 for { 4043 y := v_0 4044 if v_2.Op != OpAMD64FlagGT_UGT { 4045 break 4046 } 4047 v.copyOf(y) 4048 return true 4049 } 4050 // match: (CMOVLEQ y _ (FlagGT_ULT)) 4051 // result: y 4052 for { 4053 y := v_0 4054 if v_2.Op != OpAMD64FlagGT_ULT { 4055 break 4056 } 4057 v.copyOf(y) 4058 return true 4059 } 4060 // match: (CMOVLEQ y _ (FlagLT_ULT)) 4061 // result: y 4062 for { 4063 y := v_0 4064 if v_2.Op != OpAMD64FlagLT_ULT { 4065 break 4066 } 4067 v.copyOf(y) 4068 return true 4069 } 4070 // match: (CMOVLEQ y _ (FlagLT_UGT)) 4071 // result: y 4072 for { 4073 y := v_0 4074 if v_2.Op != OpAMD64FlagLT_UGT { 4075 break 4076 } 4077 v.copyOf(y) 4078 return true 4079 } 4080 // match: (CMOVLEQ x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s)) 4081 // result: (CMOVLEQ x y (Select1 <types.TypeFlags> blsr)) 4082 for { 4083 x := v_0 4084 y := v_1 4085 if v_2.Op != OpAMD64TESTQ { 4086 break 4087 } 4088 _ = v_2.Args[1] 4089 v_2_0 := v_2.Args[0] 4090 v_2_1 := v_2.Args[1] 4091 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { 4092 s := v_2_0 4093 if s.Op != OpSelect0 { 4094 continue 4095 } 4096 blsr := s.Args[0] 4097 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 { 4098 continue 4099 } 4100 v.reset(OpAMD64CMOVLEQ) 4101 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 4102 v0.AddArg(blsr) 4103 v.AddArg3(x, y, v0) 4104 return true 4105 } 4106 break 4107 } 4108 // match: (CMOVLEQ x y (TESTL s:(Select0 blsr:(BLSRL _)) s)) 4109 // result: (CMOVLEQ x y (Select1 <types.TypeFlags> blsr)) 4110 for { 4111 x := v_0 4112 y := v_1 4113 if v_2.Op != OpAMD64TESTL { 4114 break 4115 } 4116 _ = v_2.Args[1] 4117 v_2_0 := v_2.Args[0] 4118 v_2_1 := v_2.Args[1] 4119 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { 4120 s := v_2_0 4121 if s.Op != OpSelect0 { 4122 continue 4123 } 4124 blsr := s.Args[0] 4125 if blsr.Op != OpAMD64BLSRL || s != v_2_1 { 4126 continue 4127 } 4128 v.reset(OpAMD64CMOVLEQ) 4129 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 4130 v0.AddArg(blsr) 4131 v.AddArg3(x, y, v0) 4132 return true 4133 } 4134 break 4135 } 4136 return false 4137 } 4138 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { 4139 v_2 := v.Args[2] 4140 v_1 := v.Args[1] 4141 v_0 := v.Args[0] 4142 // match: (CMOVLGE x y (InvertFlags cond)) 4143 // result: (CMOVLLE x y cond) 4144 for { 4145 x := v_0 4146 y := v_1 4147 if v_2.Op != OpAMD64InvertFlags { 4148 break 4149 } 4150 cond := v_2.Args[0] 4151 v.reset(OpAMD64CMOVLLE) 4152 v.AddArg3(x, y, cond) 4153 return true 4154 } 4155 // match: (CMOVLGE _ x (FlagEQ)) 4156 // result: x 4157 for { 4158 x := v_1 4159 if v_2.Op != OpAMD64FlagEQ { 4160 break 4161 } 4162 v.copyOf(x) 4163 return true 4164 } 4165 // match: (CMOVLGE _ x (FlagGT_UGT)) 4166 // result: x 4167 for { 4168 x := v_1 4169 if v_2.Op != OpAMD64FlagGT_UGT { 4170 break 4171 } 4172 v.copyOf(x) 4173 return true 4174 } 4175 // match: (CMOVLGE _ x (FlagGT_ULT)) 4176 // result: x 4177 for { 4178 x := v_1 4179 if v_2.Op != OpAMD64FlagGT_ULT { 4180 break 4181 } 4182 v.copyOf(x) 4183 return true 4184 } 4185 // match: (CMOVLGE y _ (FlagLT_ULT)) 4186 // result: y 4187 for { 4188 y := v_0 4189 if v_2.Op != OpAMD64FlagLT_ULT { 4190 break 4191 } 4192 v.copyOf(y) 4193 return true 4194 } 4195 // match: (CMOVLGE y _ (FlagLT_UGT)) 4196 // result: y 4197 for { 4198 y := v_0 4199 if v_2.Op != OpAMD64FlagLT_UGT { 4200 break 4201 } 4202 v.copyOf(y) 4203 return true 4204 } 4205 return false 4206 } 4207 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { 4208 v_2 := v.Args[2] 4209 v_1 := v.Args[1] 4210 v_0 := v.Args[0] 4211 // match: (CMOVLGT x y (InvertFlags cond)) 4212 // result: (CMOVLLT x y cond) 4213 for { 4214 x := v_0 4215 y := v_1 4216 if v_2.Op != OpAMD64InvertFlags { 4217 break 4218 } 4219 cond := v_2.Args[0] 4220 v.reset(OpAMD64CMOVLLT) 4221 v.AddArg3(x, y, cond) 4222 return true 4223 } 4224 // match: (CMOVLGT y _ (FlagEQ)) 4225 // result: y 4226 for { 4227 y := v_0 4228 if v_2.Op != OpAMD64FlagEQ { 4229 break 4230 } 4231 v.copyOf(y) 4232 return true 4233 } 4234 // match: (CMOVLGT _ x (FlagGT_UGT)) 4235 // result: x 4236 for { 4237 x := v_1 4238 if v_2.Op != OpAMD64FlagGT_UGT { 4239 break 4240 } 4241 v.copyOf(x) 4242 return true 4243 } 4244 // match: (CMOVLGT _ x (FlagGT_ULT)) 4245 // result: x 4246 for { 4247 x := v_1 4248 if v_2.Op != OpAMD64FlagGT_ULT { 4249 break 4250 } 4251 v.copyOf(x) 4252 return true 4253 } 4254 // match: (CMOVLGT y _ (FlagLT_ULT)) 4255 // result: y 4256 for { 4257 y := v_0 4258 if v_2.Op != OpAMD64FlagLT_ULT { 4259 break 4260 } 4261 v.copyOf(y) 4262 return true 4263 } 4264 // match: (CMOVLGT y _ (FlagLT_UGT)) 4265 // result: y 4266 for { 4267 y := v_0 4268 if v_2.Op != OpAMD64FlagLT_UGT { 4269 break 4270 } 4271 v.copyOf(y) 4272 return true 4273 } 4274 return false 4275 } 4276 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { 4277 v_2 := v.Args[2] 4278 v_1 := v.Args[1] 4279 v_0 := v.Args[0] 4280 // match: (CMOVLHI x y (InvertFlags cond)) 4281 // result: (CMOVLCS x y cond) 4282 for { 4283 x := v_0 4284 y := v_1 4285 if v_2.Op != OpAMD64InvertFlags { 4286 break 4287 } 4288 cond := v_2.Args[0] 4289 v.reset(OpAMD64CMOVLCS) 4290 v.AddArg3(x, y, cond) 4291 return true 4292 } 4293 // match: (CMOVLHI y _ (FlagEQ)) 4294 // result: y 4295 for { 4296 y := v_0 4297 if v_2.Op != OpAMD64FlagEQ { 4298 break 4299 } 4300 v.copyOf(y) 4301 return true 4302 } 4303 // match: (CMOVLHI _ x (FlagGT_UGT)) 4304 // result: x 4305 for { 4306 x := v_1 4307 if v_2.Op != OpAMD64FlagGT_UGT { 4308 break 4309 } 4310 v.copyOf(x) 4311 return true 4312 } 4313 // match: (CMOVLHI y _ (FlagGT_ULT)) 4314 // result: y 4315 for { 4316 y := v_0 4317 if v_2.Op != OpAMD64FlagGT_ULT { 4318 break 4319 } 4320 v.copyOf(y) 4321 return true 4322 } 4323 // match: (CMOVLHI y _ (FlagLT_ULT)) 4324 // result: y 4325 for { 4326 y := v_0 4327 if v_2.Op != OpAMD64FlagLT_ULT { 4328 break 4329 } 4330 v.copyOf(y) 4331 return true 4332 } 4333 // match: (CMOVLHI _ x (FlagLT_UGT)) 4334 // result: x 4335 for { 4336 x := v_1 4337 if v_2.Op != OpAMD64FlagLT_UGT { 4338 break 4339 } 4340 v.copyOf(x) 4341 return true 4342 } 4343 return false 4344 } 4345 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { 4346 v_2 := v.Args[2] 4347 v_1 := v.Args[1] 4348 v_0 := v.Args[0] 4349 // match: (CMOVLLE x y (InvertFlags cond)) 4350 // result: (CMOVLGE x y cond) 4351 for { 4352 x := v_0 4353 y := v_1 4354 if v_2.Op != OpAMD64InvertFlags { 4355 break 4356 } 4357 cond := v_2.Args[0] 4358 v.reset(OpAMD64CMOVLGE) 4359 v.AddArg3(x, y, cond) 4360 return true 4361 } 4362 // match: (CMOVLLE _ x (FlagEQ)) 4363 // result: x 4364 for { 4365 x := v_1 4366 if v_2.Op != OpAMD64FlagEQ { 4367 break 4368 } 4369 v.copyOf(x) 4370 return true 4371 } 4372 // match: (CMOVLLE y _ (FlagGT_UGT)) 4373 // result: y 4374 for { 4375 y := v_0 4376 if v_2.Op != OpAMD64FlagGT_UGT { 4377 break 4378 } 4379 v.copyOf(y) 4380 return true 4381 } 4382 // match: (CMOVLLE y _ (FlagGT_ULT)) 4383 // result: y 4384 for { 4385 y := v_0 4386 if v_2.Op != OpAMD64FlagGT_ULT { 4387 break 4388 } 4389 v.copyOf(y) 4390 return true 4391 } 4392 // match: (CMOVLLE _ x (FlagLT_ULT)) 4393 // result: x 4394 for { 4395 x := v_1 4396 if v_2.Op != OpAMD64FlagLT_ULT { 4397 break 4398 } 4399 v.copyOf(x) 4400 return true 4401 } 4402 // match: (CMOVLLE _ x (FlagLT_UGT)) 4403 // result: x 4404 for { 4405 x := v_1 4406 if v_2.Op != OpAMD64FlagLT_UGT { 4407 break 4408 } 4409 v.copyOf(x) 4410 return true 4411 } 4412 return false 4413 } 4414 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { 4415 v_2 := v.Args[2] 4416 v_1 := v.Args[1] 4417 v_0 := v.Args[0] 4418 // match: (CMOVLLS x y (InvertFlags cond)) 4419 // result: (CMOVLCC x y cond) 4420 for { 4421 x := v_0 4422 y := v_1 4423 if v_2.Op != OpAMD64InvertFlags { 4424 break 4425 } 4426 cond := v_2.Args[0] 4427 v.reset(OpAMD64CMOVLCC) 4428 v.AddArg3(x, y, cond) 4429 return true 4430 } 4431 // match: (CMOVLLS _ x (FlagEQ)) 4432 // result: x 4433 for { 4434 x := v_1 4435 if v_2.Op != OpAMD64FlagEQ { 4436 break 4437 } 4438 v.copyOf(x) 4439 return true 4440 } 4441 // match: (CMOVLLS y _ (FlagGT_UGT)) 4442 // result: y 4443 for { 4444 y := v_0 4445 if v_2.Op != OpAMD64FlagGT_UGT { 4446 break 4447 } 4448 v.copyOf(y) 4449 return true 4450 } 4451 // match: (CMOVLLS _ x (FlagGT_ULT)) 4452 // result: x 4453 for { 4454 x := v_1 4455 if v_2.Op != OpAMD64FlagGT_ULT { 4456 break 4457 } 4458 v.copyOf(x) 4459 return true 4460 } 4461 // match: (CMOVLLS _ x (FlagLT_ULT)) 4462 // result: x 4463 for { 4464 x := v_1 4465 if v_2.Op != OpAMD64FlagLT_ULT { 4466 break 4467 } 4468 v.copyOf(x) 4469 return true 4470 } 4471 // match: (CMOVLLS y _ (FlagLT_UGT)) 4472 // result: y 4473 for { 4474 y := v_0 4475 if v_2.Op != OpAMD64FlagLT_UGT { 4476 break 4477 } 4478 v.copyOf(y) 4479 return true 4480 } 4481 return false 4482 } 4483 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { 4484 v_2 := v.Args[2] 4485 v_1 := v.Args[1] 4486 v_0 := v.Args[0] 4487 // match: (CMOVLLT x y (InvertFlags cond)) 4488 // result: (CMOVLGT x y cond) 4489 for { 4490 x := v_0 4491 y := v_1 4492 if v_2.Op != OpAMD64InvertFlags { 4493 break 4494 } 4495 cond := v_2.Args[0] 4496 v.reset(OpAMD64CMOVLGT) 4497 v.AddArg3(x, y, cond) 4498 return true 4499 } 4500 // match: (CMOVLLT y _ (FlagEQ)) 4501 // result: y 4502 for { 4503 y := v_0 4504 if v_2.Op != OpAMD64FlagEQ { 4505 break 4506 } 4507 v.copyOf(y) 4508 return true 4509 } 4510 // match: (CMOVLLT y _ (FlagGT_UGT)) 4511 // result: y 4512 for { 4513 y := v_0 4514 if v_2.Op != OpAMD64FlagGT_UGT { 4515 break 4516 } 4517 v.copyOf(y) 4518 return true 4519 } 4520 // match: (CMOVLLT y _ (FlagGT_ULT)) 4521 // result: y 4522 for { 4523 y := v_0 4524 if v_2.Op != OpAMD64FlagGT_ULT { 4525 break 4526 } 4527 v.copyOf(y) 4528 return true 4529 } 4530 // match: (CMOVLLT _ x (FlagLT_ULT)) 4531 // result: x 4532 for { 4533 x := v_1 4534 if v_2.Op != OpAMD64FlagLT_ULT { 4535 break 4536 } 4537 v.copyOf(x) 4538 return true 4539 } 4540 // match: (CMOVLLT _ x (FlagLT_UGT)) 4541 // result: x 4542 for { 4543 x := v_1 4544 if v_2.Op != OpAMD64FlagLT_UGT { 4545 break 4546 } 4547 v.copyOf(x) 4548 return true 4549 } 4550 return false 4551 } 4552 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { 4553 v_2 := v.Args[2] 4554 v_1 := v.Args[1] 4555 v_0 := v.Args[0] 4556 b := v.Block 4557 // match: (CMOVLNE x y (InvertFlags cond)) 4558 // result: (CMOVLNE x y cond) 4559 for { 4560 x := v_0 4561 y := v_1 4562 if v_2.Op != OpAMD64InvertFlags { 4563 break 4564 } 4565 cond := v_2.Args[0] 4566 v.reset(OpAMD64CMOVLNE) 4567 v.AddArg3(x, y, cond) 4568 return true 4569 } 4570 // match: (CMOVLNE y _ (FlagEQ)) 4571 // result: y 4572 for { 4573 y := v_0 4574 if v_2.Op != OpAMD64FlagEQ { 4575 break 4576 } 4577 v.copyOf(y) 4578 return true 4579 } 4580 // match: (CMOVLNE _ x (FlagGT_UGT)) 4581 // result: x 4582 for { 4583 x := v_1 4584 if v_2.Op != OpAMD64FlagGT_UGT { 4585 break 4586 } 4587 v.copyOf(x) 4588 return true 4589 } 4590 // match: (CMOVLNE _ x (FlagGT_ULT)) 4591 // result: x 4592 for { 4593 x := v_1 4594 if v_2.Op != OpAMD64FlagGT_ULT { 4595 break 4596 } 4597 v.copyOf(x) 4598 return true 4599 } 4600 // match: (CMOVLNE _ x (FlagLT_ULT)) 4601 // result: x 4602 for { 4603 x := v_1 4604 if v_2.Op != OpAMD64FlagLT_ULT { 4605 break 4606 } 4607 v.copyOf(x) 4608 return true 4609 } 4610 // match: (CMOVLNE _ x (FlagLT_UGT)) 4611 // result: x 4612 for { 4613 x := v_1 4614 if v_2.Op != OpAMD64FlagLT_UGT { 4615 break 4616 } 4617 v.copyOf(x) 4618 return true 4619 } 4620 // match: (CMOVLNE x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s)) 4621 // result: (CMOVLNE x y (Select1 <types.TypeFlags> blsr)) 4622 for { 4623 x := v_0 4624 y := v_1 4625 if v_2.Op != OpAMD64TESTQ { 4626 break 4627 } 4628 _ = v_2.Args[1] 4629 v_2_0 := v_2.Args[0] 4630 v_2_1 := v_2.Args[1] 4631 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { 4632 s := v_2_0 4633 if s.Op != OpSelect0 { 4634 continue 4635 } 4636 blsr := s.Args[0] 4637 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 { 4638 continue 4639 } 4640 v.reset(OpAMD64CMOVLNE) 4641 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 4642 v0.AddArg(blsr) 4643 v.AddArg3(x, y, v0) 4644 return true 4645 } 4646 break 4647 } 4648 // match: (CMOVLNE x y (TESTL s:(Select0 blsr:(BLSRL _)) s)) 4649 // result: (CMOVLNE x y (Select1 <types.TypeFlags> blsr)) 4650 for { 4651 x := v_0 4652 y := v_1 4653 if v_2.Op != OpAMD64TESTL { 4654 break 4655 } 4656 _ = v_2.Args[1] 4657 v_2_0 := v_2.Args[0] 4658 v_2_1 := v_2.Args[1] 4659 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { 4660 s := v_2_0 4661 if s.Op != OpSelect0 { 4662 continue 4663 } 4664 blsr := s.Args[0] 4665 if blsr.Op != OpAMD64BLSRL || s != v_2_1 { 4666 continue 4667 } 4668 v.reset(OpAMD64CMOVLNE) 4669 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 4670 v0.AddArg(blsr) 4671 v.AddArg3(x, y, v0) 4672 return true 4673 } 4674 break 4675 } 4676 return false 4677 } 4678 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { 4679 v_2 := v.Args[2] 4680 v_1 := v.Args[1] 4681 v_0 := v.Args[0] 4682 // match: (CMOVQCC x y (InvertFlags cond)) 4683 // result: (CMOVQLS x y cond) 4684 for { 4685 x := v_0 4686 y := v_1 4687 if v_2.Op != OpAMD64InvertFlags { 4688 break 4689 } 4690 cond := v_2.Args[0] 4691 v.reset(OpAMD64CMOVQLS) 4692 v.AddArg3(x, y, cond) 4693 return true 4694 } 4695 // match: (CMOVQCC _ x (FlagEQ)) 4696 // result: x 4697 for { 4698 x := v_1 4699 if v_2.Op != OpAMD64FlagEQ { 4700 break 4701 } 4702 v.copyOf(x) 4703 return true 4704 } 4705 // match: (CMOVQCC _ x (FlagGT_UGT)) 4706 // result: x 4707 for { 4708 x := v_1 4709 if v_2.Op != OpAMD64FlagGT_UGT { 4710 break 4711 } 4712 v.copyOf(x) 4713 return true 4714 } 4715 // match: (CMOVQCC y _ (FlagGT_ULT)) 4716 // result: y 4717 for { 4718 y := v_0 4719 if v_2.Op != OpAMD64FlagGT_ULT { 4720 break 4721 } 4722 v.copyOf(y) 4723 return true 4724 } 4725 // match: (CMOVQCC y _ (FlagLT_ULT)) 4726 // result: y 4727 for { 4728 y := v_0 4729 if v_2.Op != OpAMD64FlagLT_ULT { 4730 break 4731 } 4732 v.copyOf(y) 4733 return true 4734 } 4735 // match: (CMOVQCC _ x (FlagLT_UGT)) 4736 // result: x 4737 for { 4738 x := v_1 4739 if v_2.Op != OpAMD64FlagLT_UGT { 4740 break 4741 } 4742 v.copyOf(x) 4743 return true 4744 } 4745 return false 4746 } 4747 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { 4748 v_2 := v.Args[2] 4749 v_1 := v.Args[1] 4750 v_0 := v.Args[0] 4751 // match: (CMOVQCS x y (InvertFlags cond)) 4752 // result: (CMOVQHI x y cond) 4753 for { 4754 x := v_0 4755 y := v_1 4756 if v_2.Op != OpAMD64InvertFlags { 4757 break 4758 } 4759 cond := v_2.Args[0] 4760 v.reset(OpAMD64CMOVQHI) 4761 v.AddArg3(x, y, cond) 4762 return true 4763 } 4764 // match: (CMOVQCS y _ (FlagEQ)) 4765 // result: y 4766 for { 4767 y := v_0 4768 if v_2.Op != OpAMD64FlagEQ { 4769 break 4770 } 4771 v.copyOf(y) 4772 return true 4773 } 4774 // match: (CMOVQCS y _ (FlagGT_UGT)) 4775 // result: y 4776 for { 4777 y := v_0 4778 if v_2.Op != OpAMD64FlagGT_UGT { 4779 break 4780 } 4781 v.copyOf(y) 4782 return true 4783 } 4784 // match: (CMOVQCS _ x (FlagGT_ULT)) 4785 // result: x 4786 for { 4787 x := v_1 4788 if v_2.Op != OpAMD64FlagGT_ULT { 4789 break 4790 } 4791 v.copyOf(x) 4792 return true 4793 } 4794 // match: (CMOVQCS _ x (FlagLT_ULT)) 4795 // result: x 4796 for { 4797 x := v_1 4798 if v_2.Op != OpAMD64FlagLT_ULT { 4799 break 4800 } 4801 v.copyOf(x) 4802 return true 4803 } 4804 // match: (CMOVQCS y _ (FlagLT_UGT)) 4805 // result: y 4806 for { 4807 y := v_0 4808 if v_2.Op != OpAMD64FlagLT_UGT { 4809 break 4810 } 4811 v.copyOf(y) 4812 return true 4813 } 4814 return false 4815 } 4816 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { 4817 v_2 := v.Args[2] 4818 v_1 := v.Args[1] 4819 v_0 := v.Args[0] 4820 b := v.Block 4821 // match: (CMOVQEQ x y (InvertFlags cond)) 4822 // result: (CMOVQEQ x y cond) 4823 for { 4824 x := v_0 4825 y := v_1 4826 if v_2.Op != OpAMD64InvertFlags { 4827 break 4828 } 4829 cond := v_2.Args[0] 4830 v.reset(OpAMD64CMOVQEQ) 4831 v.AddArg3(x, y, cond) 4832 return true 4833 } 4834 // match: (CMOVQEQ _ x (FlagEQ)) 4835 // result: x 4836 for { 4837 x := v_1 4838 if v_2.Op != OpAMD64FlagEQ { 4839 break 4840 } 4841 v.copyOf(x) 4842 return true 4843 } 4844 // match: (CMOVQEQ y _ (FlagGT_UGT)) 4845 // result: y 4846 for { 4847 y := v_0 4848 if v_2.Op != OpAMD64FlagGT_UGT { 4849 break 4850 } 4851 v.copyOf(y) 4852 return true 4853 } 4854 // match: (CMOVQEQ y _ (FlagGT_ULT)) 4855 // result: y 4856 for { 4857 y := v_0 4858 if v_2.Op != OpAMD64FlagGT_ULT { 4859 break 4860 } 4861 v.copyOf(y) 4862 return true 4863 } 4864 // match: (CMOVQEQ y _ (FlagLT_ULT)) 4865 // result: y 4866 for { 4867 y := v_0 4868 if v_2.Op != OpAMD64FlagLT_ULT { 4869 break 4870 } 4871 v.copyOf(y) 4872 return true 4873 } 4874 // match: (CMOVQEQ y _ (FlagLT_UGT)) 4875 // result: y 4876 for { 4877 y := v_0 4878 if v_2.Op != OpAMD64FlagLT_UGT { 4879 break 4880 } 4881 v.copyOf(y) 4882 return true 4883 } 4884 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 4885 // cond: c != 0 4886 // result: x 4887 for { 4888 x := v_0 4889 if v_2.Op != OpSelect1 { 4890 break 4891 } 4892 v_2_0 := v_2.Args[0] 4893 if v_2_0.Op != OpAMD64BSFQ { 4894 break 4895 } 4896 v_2_0_0 := v_2_0.Args[0] 4897 if v_2_0_0.Op != OpAMD64ORQconst { 4898 break 4899 } 4900 c := auxIntToInt32(v_2_0_0.AuxInt) 4901 if !(c != 0) { 4902 break 4903 } 4904 v.copyOf(x) 4905 return true 4906 } 4907 // match: (CMOVQEQ x _ (Select1 (BSRQ (ORQconst [c] _)))) 4908 // cond: c != 0 4909 // result: x 4910 for { 4911 x := v_0 4912 if v_2.Op != OpSelect1 { 4913 break 4914 } 4915 v_2_0 := v_2.Args[0] 4916 if v_2_0.Op != OpAMD64BSRQ { 4917 break 4918 } 4919 v_2_0_0 := v_2_0.Args[0] 4920 if v_2_0_0.Op != OpAMD64ORQconst { 4921 break 4922 } 4923 c := auxIntToInt32(v_2_0_0.AuxInt) 4924 if !(c != 0) { 4925 break 4926 } 4927 v.copyOf(x) 4928 return true 4929 } 4930 // match: (CMOVQEQ x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s)) 4931 // result: (CMOVQEQ x y (Select1 <types.TypeFlags> blsr)) 4932 for { 4933 x := v_0 4934 y := v_1 4935 if v_2.Op != OpAMD64TESTQ { 4936 break 4937 } 4938 _ = v_2.Args[1] 4939 v_2_0 := v_2.Args[0] 4940 v_2_1 := v_2.Args[1] 4941 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { 4942 s := v_2_0 4943 if s.Op != OpSelect0 { 4944 continue 4945 } 4946 blsr := s.Args[0] 4947 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 { 4948 continue 4949 } 4950 v.reset(OpAMD64CMOVQEQ) 4951 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 4952 v0.AddArg(blsr) 4953 v.AddArg3(x, y, v0) 4954 return true 4955 } 4956 break 4957 } 4958 // match: (CMOVQEQ x y (TESTL s:(Select0 blsr:(BLSRL _)) s)) 4959 // result: (CMOVQEQ x y (Select1 <types.TypeFlags> blsr)) 4960 for { 4961 x := v_0 4962 y := v_1 4963 if v_2.Op != OpAMD64TESTL { 4964 break 4965 } 4966 _ = v_2.Args[1] 4967 v_2_0 := v_2.Args[0] 4968 v_2_1 := v_2.Args[1] 4969 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { 4970 s := v_2_0 4971 if s.Op != OpSelect0 { 4972 continue 4973 } 4974 blsr := s.Args[0] 4975 if blsr.Op != OpAMD64BLSRL || s != v_2_1 { 4976 continue 4977 } 4978 v.reset(OpAMD64CMOVQEQ) 4979 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 4980 v0.AddArg(blsr) 4981 v.AddArg3(x, y, v0) 4982 return true 4983 } 4984 break 4985 } 4986 return false 4987 } 4988 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { 4989 v_2 := v.Args[2] 4990 v_1 := v.Args[1] 4991 v_0 := v.Args[0] 4992 // match: (CMOVQGE x y (InvertFlags cond)) 4993 // result: (CMOVQLE x y cond) 4994 for { 4995 x := v_0 4996 y := v_1 4997 if v_2.Op != OpAMD64InvertFlags { 4998 break 4999 } 5000 cond := v_2.Args[0] 5001 v.reset(OpAMD64CMOVQLE) 5002 v.AddArg3(x, y, cond) 5003 return true 5004 } 5005 // match: (CMOVQGE _ x (FlagEQ)) 5006 // result: x 5007 for { 5008 x := v_1 5009 if v_2.Op != OpAMD64FlagEQ { 5010 break 5011 } 5012 v.copyOf(x) 5013 return true 5014 } 5015 // match: (CMOVQGE _ x (FlagGT_UGT)) 5016 // result: x 5017 for { 5018 x := v_1 5019 if v_2.Op != OpAMD64FlagGT_UGT { 5020 break 5021 } 5022 v.copyOf(x) 5023 return true 5024 } 5025 // match: (CMOVQGE _ x (FlagGT_ULT)) 5026 // result: x 5027 for { 5028 x := v_1 5029 if v_2.Op != OpAMD64FlagGT_ULT { 5030 break 5031 } 5032 v.copyOf(x) 5033 return true 5034 } 5035 // match: (CMOVQGE y _ (FlagLT_ULT)) 5036 // result: y 5037 for { 5038 y := v_0 5039 if v_2.Op != OpAMD64FlagLT_ULT { 5040 break 5041 } 5042 v.copyOf(y) 5043 return true 5044 } 5045 // match: (CMOVQGE y _ (FlagLT_UGT)) 5046 // result: y 5047 for { 5048 y := v_0 5049 if v_2.Op != OpAMD64FlagLT_UGT { 5050 break 5051 } 5052 v.copyOf(y) 5053 return true 5054 } 5055 return false 5056 } 5057 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { 5058 v_2 := v.Args[2] 5059 v_1 := v.Args[1] 5060 v_0 := v.Args[0] 5061 // match: (CMOVQGT x y (InvertFlags cond)) 5062 // result: (CMOVQLT x y cond) 5063 for { 5064 x := v_0 5065 y := v_1 5066 if v_2.Op != OpAMD64InvertFlags { 5067 break 5068 } 5069 cond := v_2.Args[0] 5070 v.reset(OpAMD64CMOVQLT) 5071 v.AddArg3(x, y, cond) 5072 return true 5073 } 5074 // match: (CMOVQGT y _ (FlagEQ)) 5075 // result: y 5076 for { 5077 y := v_0 5078 if v_2.Op != OpAMD64FlagEQ { 5079 break 5080 } 5081 v.copyOf(y) 5082 return true 5083 } 5084 // match: (CMOVQGT _ x (FlagGT_UGT)) 5085 // result: x 5086 for { 5087 x := v_1 5088 if v_2.Op != OpAMD64FlagGT_UGT { 5089 break 5090 } 5091 v.copyOf(x) 5092 return true 5093 } 5094 // match: (CMOVQGT _ x (FlagGT_ULT)) 5095 // result: x 5096 for { 5097 x := v_1 5098 if v_2.Op != OpAMD64FlagGT_ULT { 5099 break 5100 } 5101 v.copyOf(x) 5102 return true 5103 } 5104 // match: (CMOVQGT y _ (FlagLT_ULT)) 5105 // result: y 5106 for { 5107 y := v_0 5108 if v_2.Op != OpAMD64FlagLT_ULT { 5109 break 5110 } 5111 v.copyOf(y) 5112 return true 5113 } 5114 // match: (CMOVQGT y _ (FlagLT_UGT)) 5115 // result: y 5116 for { 5117 y := v_0 5118 if v_2.Op != OpAMD64FlagLT_UGT { 5119 break 5120 } 5121 v.copyOf(y) 5122 return true 5123 } 5124 return false 5125 } 5126 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { 5127 v_2 := v.Args[2] 5128 v_1 := v.Args[1] 5129 v_0 := v.Args[0] 5130 // match: (CMOVQHI x y (InvertFlags cond)) 5131 // result: (CMOVQCS x y cond) 5132 for { 5133 x := v_0 5134 y := v_1 5135 if v_2.Op != OpAMD64InvertFlags { 5136 break 5137 } 5138 cond := v_2.Args[0] 5139 v.reset(OpAMD64CMOVQCS) 5140 v.AddArg3(x, y, cond) 5141 return true 5142 } 5143 // match: (CMOVQHI y _ (FlagEQ)) 5144 // result: y 5145 for { 5146 y := v_0 5147 if v_2.Op != OpAMD64FlagEQ { 5148 break 5149 } 5150 v.copyOf(y) 5151 return true 5152 } 5153 // match: (CMOVQHI _ x (FlagGT_UGT)) 5154 // result: x 5155 for { 5156 x := v_1 5157 if v_2.Op != OpAMD64FlagGT_UGT { 5158 break 5159 } 5160 v.copyOf(x) 5161 return true 5162 } 5163 // match: (CMOVQHI y _ (FlagGT_ULT)) 5164 // result: y 5165 for { 5166 y := v_0 5167 if v_2.Op != OpAMD64FlagGT_ULT { 5168 break 5169 } 5170 v.copyOf(y) 5171 return true 5172 } 5173 // match: (CMOVQHI y _ (FlagLT_ULT)) 5174 // result: y 5175 for { 5176 y := v_0 5177 if v_2.Op != OpAMD64FlagLT_ULT { 5178 break 5179 } 5180 v.copyOf(y) 5181 return true 5182 } 5183 // match: (CMOVQHI _ x (FlagLT_UGT)) 5184 // result: x 5185 for { 5186 x := v_1 5187 if v_2.Op != OpAMD64FlagLT_UGT { 5188 break 5189 } 5190 v.copyOf(x) 5191 return true 5192 } 5193 return false 5194 } 5195 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { 5196 v_2 := v.Args[2] 5197 v_1 := v.Args[1] 5198 v_0 := v.Args[0] 5199 // match: (CMOVQLE x y (InvertFlags cond)) 5200 // result: (CMOVQGE x y cond) 5201 for { 5202 x := v_0 5203 y := v_1 5204 if v_2.Op != OpAMD64InvertFlags { 5205 break 5206 } 5207 cond := v_2.Args[0] 5208 v.reset(OpAMD64CMOVQGE) 5209 v.AddArg3(x, y, cond) 5210 return true 5211 } 5212 // match: (CMOVQLE _ x (FlagEQ)) 5213 // result: x 5214 for { 5215 x := v_1 5216 if v_2.Op != OpAMD64FlagEQ { 5217 break 5218 } 5219 v.copyOf(x) 5220 return true 5221 } 5222 // match: (CMOVQLE y _ (FlagGT_UGT)) 5223 // result: y 5224 for { 5225 y := v_0 5226 if v_2.Op != OpAMD64FlagGT_UGT { 5227 break 5228 } 5229 v.copyOf(y) 5230 return true 5231 } 5232 // match: (CMOVQLE y _ (FlagGT_ULT)) 5233 // result: y 5234 for { 5235 y := v_0 5236 if v_2.Op != OpAMD64FlagGT_ULT { 5237 break 5238 } 5239 v.copyOf(y) 5240 return true 5241 } 5242 // match: (CMOVQLE _ x (FlagLT_ULT)) 5243 // result: x 5244 for { 5245 x := v_1 5246 if v_2.Op != OpAMD64FlagLT_ULT { 5247 break 5248 } 5249 v.copyOf(x) 5250 return true 5251 } 5252 // match: (CMOVQLE _ x (FlagLT_UGT)) 5253 // result: x 5254 for { 5255 x := v_1 5256 if v_2.Op != OpAMD64FlagLT_UGT { 5257 break 5258 } 5259 v.copyOf(x) 5260 return true 5261 } 5262 return false 5263 } 5264 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { 5265 v_2 := v.Args[2] 5266 v_1 := v.Args[1] 5267 v_0 := v.Args[0] 5268 // match: (CMOVQLS x y (InvertFlags cond)) 5269 // result: (CMOVQCC x y cond) 5270 for { 5271 x := v_0 5272 y := v_1 5273 if v_2.Op != OpAMD64InvertFlags { 5274 break 5275 } 5276 cond := v_2.Args[0] 5277 v.reset(OpAMD64CMOVQCC) 5278 v.AddArg3(x, y, cond) 5279 return true 5280 } 5281 // match: (CMOVQLS _ x (FlagEQ)) 5282 // result: x 5283 for { 5284 x := v_1 5285 if v_2.Op != OpAMD64FlagEQ { 5286 break 5287 } 5288 v.copyOf(x) 5289 return true 5290 } 5291 // match: (CMOVQLS y _ (FlagGT_UGT)) 5292 // result: y 5293 for { 5294 y := v_0 5295 if v_2.Op != OpAMD64FlagGT_UGT { 5296 break 5297 } 5298 v.copyOf(y) 5299 return true 5300 } 5301 // match: (CMOVQLS _ x (FlagGT_ULT)) 5302 // result: x 5303 for { 5304 x := v_1 5305 if v_2.Op != OpAMD64FlagGT_ULT { 5306 break 5307 } 5308 v.copyOf(x) 5309 return true 5310 } 5311 // match: (CMOVQLS _ x (FlagLT_ULT)) 5312 // result: x 5313 for { 5314 x := v_1 5315 if v_2.Op != OpAMD64FlagLT_ULT { 5316 break 5317 } 5318 v.copyOf(x) 5319 return true 5320 } 5321 // match: (CMOVQLS y _ (FlagLT_UGT)) 5322 // result: y 5323 for { 5324 y := v_0 5325 if v_2.Op != OpAMD64FlagLT_UGT { 5326 break 5327 } 5328 v.copyOf(y) 5329 return true 5330 } 5331 return false 5332 } 5333 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { 5334 v_2 := v.Args[2] 5335 v_1 := v.Args[1] 5336 v_0 := v.Args[0] 5337 // match: (CMOVQLT x y (InvertFlags cond)) 5338 // result: (CMOVQGT x y cond) 5339 for { 5340 x := v_0 5341 y := v_1 5342 if v_2.Op != OpAMD64InvertFlags { 5343 break 5344 } 5345 cond := v_2.Args[0] 5346 v.reset(OpAMD64CMOVQGT) 5347 v.AddArg3(x, y, cond) 5348 return true 5349 } 5350 // match: (CMOVQLT y _ (FlagEQ)) 5351 // result: y 5352 for { 5353 y := v_0 5354 if v_2.Op != OpAMD64FlagEQ { 5355 break 5356 } 5357 v.copyOf(y) 5358 return true 5359 } 5360 // match: (CMOVQLT y _ (FlagGT_UGT)) 5361 // result: y 5362 for { 5363 y := v_0 5364 if v_2.Op != OpAMD64FlagGT_UGT { 5365 break 5366 } 5367 v.copyOf(y) 5368 return true 5369 } 5370 // match: (CMOVQLT y _ (FlagGT_ULT)) 5371 // result: y 5372 for { 5373 y := v_0 5374 if v_2.Op != OpAMD64FlagGT_ULT { 5375 break 5376 } 5377 v.copyOf(y) 5378 return true 5379 } 5380 // match: (CMOVQLT _ x (FlagLT_ULT)) 5381 // result: x 5382 for { 5383 x := v_1 5384 if v_2.Op != OpAMD64FlagLT_ULT { 5385 break 5386 } 5387 v.copyOf(x) 5388 return true 5389 } 5390 // match: (CMOVQLT _ x (FlagLT_UGT)) 5391 // result: x 5392 for { 5393 x := v_1 5394 if v_2.Op != OpAMD64FlagLT_UGT { 5395 break 5396 } 5397 v.copyOf(x) 5398 return true 5399 } 5400 return false 5401 } 5402 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { 5403 v_2 := v.Args[2] 5404 v_1 := v.Args[1] 5405 v_0 := v.Args[0] 5406 b := v.Block 5407 // match: (CMOVQNE x y (InvertFlags cond)) 5408 // result: (CMOVQNE x y cond) 5409 for { 5410 x := v_0 5411 y := v_1 5412 if v_2.Op != OpAMD64InvertFlags { 5413 break 5414 } 5415 cond := v_2.Args[0] 5416 v.reset(OpAMD64CMOVQNE) 5417 v.AddArg3(x, y, cond) 5418 return true 5419 } 5420 // match: (CMOVQNE y _ (FlagEQ)) 5421 // result: y 5422 for { 5423 y := v_0 5424 if v_2.Op != OpAMD64FlagEQ { 5425 break 5426 } 5427 v.copyOf(y) 5428 return true 5429 } 5430 // match: (CMOVQNE _ x (FlagGT_UGT)) 5431 // result: x 5432 for { 5433 x := v_1 5434 if v_2.Op != OpAMD64FlagGT_UGT { 5435 break 5436 } 5437 v.copyOf(x) 5438 return true 5439 } 5440 // match: (CMOVQNE _ x (FlagGT_ULT)) 5441 // result: x 5442 for { 5443 x := v_1 5444 if v_2.Op != OpAMD64FlagGT_ULT { 5445 break 5446 } 5447 v.copyOf(x) 5448 return true 5449 } 5450 // match: (CMOVQNE _ x (FlagLT_ULT)) 5451 // result: x 5452 for { 5453 x := v_1 5454 if v_2.Op != OpAMD64FlagLT_ULT { 5455 break 5456 } 5457 v.copyOf(x) 5458 return true 5459 } 5460 // match: (CMOVQNE _ x (FlagLT_UGT)) 5461 // result: x 5462 for { 5463 x := v_1 5464 if v_2.Op != OpAMD64FlagLT_UGT { 5465 break 5466 } 5467 v.copyOf(x) 5468 return true 5469 } 5470 // match: (CMOVQNE x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s)) 5471 // result: (CMOVQNE x y (Select1 <types.TypeFlags> blsr)) 5472 for { 5473 x := v_0 5474 y := v_1 5475 if v_2.Op != OpAMD64TESTQ { 5476 break 5477 } 5478 _ = v_2.Args[1] 5479 v_2_0 := v_2.Args[0] 5480 v_2_1 := v_2.Args[1] 5481 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { 5482 s := v_2_0 5483 if s.Op != OpSelect0 { 5484 continue 5485 } 5486 blsr := s.Args[0] 5487 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 { 5488 continue 5489 } 5490 v.reset(OpAMD64CMOVQNE) 5491 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 5492 v0.AddArg(blsr) 5493 v.AddArg3(x, y, v0) 5494 return true 5495 } 5496 break 5497 } 5498 // match: (CMOVQNE x y (TESTL s:(Select0 blsr:(BLSRL _)) s)) 5499 // result: (CMOVQNE x y (Select1 <types.TypeFlags> blsr)) 5500 for { 5501 x := v_0 5502 y := v_1 5503 if v_2.Op != OpAMD64TESTL { 5504 break 5505 } 5506 _ = v_2.Args[1] 5507 v_2_0 := v_2.Args[0] 5508 v_2_1 := v_2.Args[1] 5509 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { 5510 s := v_2_0 5511 if s.Op != OpSelect0 { 5512 continue 5513 } 5514 blsr := s.Args[0] 5515 if blsr.Op != OpAMD64BLSRL || s != v_2_1 { 5516 continue 5517 } 5518 v.reset(OpAMD64CMOVQNE) 5519 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 5520 v0.AddArg(blsr) 5521 v.AddArg3(x, y, v0) 5522 return true 5523 } 5524 break 5525 } 5526 return false 5527 } 5528 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { 5529 v_2 := v.Args[2] 5530 v_1 := v.Args[1] 5531 v_0 := v.Args[0] 5532 // match: (CMOVWCC x y (InvertFlags cond)) 5533 // result: (CMOVWLS x y cond) 5534 for { 5535 x := v_0 5536 y := v_1 5537 if v_2.Op != OpAMD64InvertFlags { 5538 break 5539 } 5540 cond := v_2.Args[0] 5541 v.reset(OpAMD64CMOVWLS) 5542 v.AddArg3(x, y, cond) 5543 return true 5544 } 5545 // match: (CMOVWCC _ x (FlagEQ)) 5546 // result: x 5547 for { 5548 x := v_1 5549 if v_2.Op != OpAMD64FlagEQ { 5550 break 5551 } 5552 v.copyOf(x) 5553 return true 5554 } 5555 // match: (CMOVWCC _ x (FlagGT_UGT)) 5556 // result: x 5557 for { 5558 x := v_1 5559 if v_2.Op != OpAMD64FlagGT_UGT { 5560 break 5561 } 5562 v.copyOf(x) 5563 return true 5564 } 5565 // match: (CMOVWCC y _ (FlagGT_ULT)) 5566 // result: y 5567 for { 5568 y := v_0 5569 if v_2.Op != OpAMD64FlagGT_ULT { 5570 break 5571 } 5572 v.copyOf(y) 5573 return true 5574 } 5575 // match: (CMOVWCC y _ (FlagLT_ULT)) 5576 // result: y 5577 for { 5578 y := v_0 5579 if v_2.Op != OpAMD64FlagLT_ULT { 5580 break 5581 } 5582 v.copyOf(y) 5583 return true 5584 } 5585 // match: (CMOVWCC _ x (FlagLT_UGT)) 5586 // result: x 5587 for { 5588 x := v_1 5589 if v_2.Op != OpAMD64FlagLT_UGT { 5590 break 5591 } 5592 v.copyOf(x) 5593 return true 5594 } 5595 return false 5596 } 5597 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { 5598 v_2 := v.Args[2] 5599 v_1 := v.Args[1] 5600 v_0 := v.Args[0] 5601 // match: (CMOVWCS x y (InvertFlags cond)) 5602 // result: (CMOVWHI x y cond) 5603 for { 5604 x := v_0 5605 y := v_1 5606 if v_2.Op != OpAMD64InvertFlags { 5607 break 5608 } 5609 cond := v_2.Args[0] 5610 v.reset(OpAMD64CMOVWHI) 5611 v.AddArg3(x, y, cond) 5612 return true 5613 } 5614 // match: (CMOVWCS y _ (FlagEQ)) 5615 // result: y 5616 for { 5617 y := v_0 5618 if v_2.Op != OpAMD64FlagEQ { 5619 break 5620 } 5621 v.copyOf(y) 5622 return true 5623 } 5624 // match: (CMOVWCS y _ (FlagGT_UGT)) 5625 // result: y 5626 for { 5627 y := v_0 5628 if v_2.Op != OpAMD64FlagGT_UGT { 5629 break 5630 } 5631 v.copyOf(y) 5632 return true 5633 } 5634 // match: (CMOVWCS _ x (FlagGT_ULT)) 5635 // result: x 5636 for { 5637 x := v_1 5638 if v_2.Op != OpAMD64FlagGT_ULT { 5639 break 5640 } 5641 v.copyOf(x) 5642 return true 5643 } 5644 // match: (CMOVWCS _ x (FlagLT_ULT)) 5645 // result: x 5646 for { 5647 x := v_1 5648 if v_2.Op != OpAMD64FlagLT_ULT { 5649 break 5650 } 5651 v.copyOf(x) 5652 return true 5653 } 5654 // match: (CMOVWCS y _ (FlagLT_UGT)) 5655 // result: y 5656 for { 5657 y := v_0 5658 if v_2.Op != OpAMD64FlagLT_UGT { 5659 break 5660 } 5661 v.copyOf(y) 5662 return true 5663 } 5664 return false 5665 } 5666 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { 5667 v_2 := v.Args[2] 5668 v_1 := v.Args[1] 5669 v_0 := v.Args[0] 5670 // match: (CMOVWEQ x y (InvertFlags cond)) 5671 // result: (CMOVWEQ x y cond) 5672 for { 5673 x := v_0 5674 y := v_1 5675 if v_2.Op != OpAMD64InvertFlags { 5676 break 5677 } 5678 cond := v_2.Args[0] 5679 v.reset(OpAMD64CMOVWEQ) 5680 v.AddArg3(x, y, cond) 5681 return true 5682 } 5683 // match: (CMOVWEQ _ x (FlagEQ)) 5684 // result: x 5685 for { 5686 x := v_1 5687 if v_2.Op != OpAMD64FlagEQ { 5688 break 5689 } 5690 v.copyOf(x) 5691 return true 5692 } 5693 // match: (CMOVWEQ y _ (FlagGT_UGT)) 5694 // result: y 5695 for { 5696 y := v_0 5697 if v_2.Op != OpAMD64FlagGT_UGT { 5698 break 5699 } 5700 v.copyOf(y) 5701 return true 5702 } 5703 // match: (CMOVWEQ y _ (FlagGT_ULT)) 5704 // result: y 5705 for { 5706 y := v_0 5707 if v_2.Op != OpAMD64FlagGT_ULT { 5708 break 5709 } 5710 v.copyOf(y) 5711 return true 5712 } 5713 // match: (CMOVWEQ y _ (FlagLT_ULT)) 5714 // result: y 5715 for { 5716 y := v_0 5717 if v_2.Op != OpAMD64FlagLT_ULT { 5718 break 5719 } 5720 v.copyOf(y) 5721 return true 5722 } 5723 // match: (CMOVWEQ y _ (FlagLT_UGT)) 5724 // result: y 5725 for { 5726 y := v_0 5727 if v_2.Op != OpAMD64FlagLT_UGT { 5728 break 5729 } 5730 v.copyOf(y) 5731 return true 5732 } 5733 return false 5734 } 5735 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { 5736 v_2 := v.Args[2] 5737 v_1 := v.Args[1] 5738 v_0 := v.Args[0] 5739 // match: (CMOVWGE x y (InvertFlags cond)) 5740 // result: (CMOVWLE x y cond) 5741 for { 5742 x := v_0 5743 y := v_1 5744 if v_2.Op != OpAMD64InvertFlags { 5745 break 5746 } 5747 cond := v_2.Args[0] 5748 v.reset(OpAMD64CMOVWLE) 5749 v.AddArg3(x, y, cond) 5750 return true 5751 } 5752 // match: (CMOVWGE _ x (FlagEQ)) 5753 // result: x 5754 for { 5755 x := v_1 5756 if v_2.Op != OpAMD64FlagEQ { 5757 break 5758 } 5759 v.copyOf(x) 5760 return true 5761 } 5762 // match: (CMOVWGE _ x (FlagGT_UGT)) 5763 // result: x 5764 for { 5765 x := v_1 5766 if v_2.Op != OpAMD64FlagGT_UGT { 5767 break 5768 } 5769 v.copyOf(x) 5770 return true 5771 } 5772 // match: (CMOVWGE _ x (FlagGT_ULT)) 5773 // result: x 5774 for { 5775 x := v_1 5776 if v_2.Op != OpAMD64FlagGT_ULT { 5777 break 5778 } 5779 v.copyOf(x) 5780 return true 5781 } 5782 // match: (CMOVWGE y _ (FlagLT_ULT)) 5783 // result: y 5784 for { 5785 y := v_0 5786 if v_2.Op != OpAMD64FlagLT_ULT { 5787 break 5788 } 5789 v.copyOf(y) 5790 return true 5791 } 5792 // match: (CMOVWGE y _ (FlagLT_UGT)) 5793 // result: y 5794 for { 5795 y := v_0 5796 if v_2.Op != OpAMD64FlagLT_UGT { 5797 break 5798 } 5799 v.copyOf(y) 5800 return true 5801 } 5802 return false 5803 } 5804 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { 5805 v_2 := v.Args[2] 5806 v_1 := v.Args[1] 5807 v_0 := v.Args[0] 5808 // match: (CMOVWGT x y (InvertFlags cond)) 5809 // result: (CMOVWLT x y cond) 5810 for { 5811 x := v_0 5812 y := v_1 5813 if v_2.Op != OpAMD64InvertFlags { 5814 break 5815 } 5816 cond := v_2.Args[0] 5817 v.reset(OpAMD64CMOVWLT) 5818 v.AddArg3(x, y, cond) 5819 return true 5820 } 5821 // match: (CMOVWGT y _ (FlagEQ)) 5822 // result: y 5823 for { 5824 y := v_0 5825 if v_2.Op != OpAMD64FlagEQ { 5826 break 5827 } 5828 v.copyOf(y) 5829 return true 5830 } 5831 // match: (CMOVWGT _ x (FlagGT_UGT)) 5832 // result: x 5833 for { 5834 x := v_1 5835 if v_2.Op != OpAMD64FlagGT_UGT { 5836 break 5837 } 5838 v.copyOf(x) 5839 return true 5840 } 5841 // match: (CMOVWGT _ x (FlagGT_ULT)) 5842 // result: x 5843 for { 5844 x := v_1 5845 if v_2.Op != OpAMD64FlagGT_ULT { 5846 break 5847 } 5848 v.copyOf(x) 5849 return true 5850 } 5851 // match: (CMOVWGT y _ (FlagLT_ULT)) 5852 // result: y 5853 for { 5854 y := v_0 5855 if v_2.Op != OpAMD64FlagLT_ULT { 5856 break 5857 } 5858 v.copyOf(y) 5859 return true 5860 } 5861 // match: (CMOVWGT y _ (FlagLT_UGT)) 5862 // result: y 5863 for { 5864 y := v_0 5865 if v_2.Op != OpAMD64FlagLT_UGT { 5866 break 5867 } 5868 v.copyOf(y) 5869 return true 5870 } 5871 return false 5872 } 5873 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { 5874 v_2 := v.Args[2] 5875 v_1 := v.Args[1] 5876 v_0 := v.Args[0] 5877 // match: (CMOVWHI x y (InvertFlags cond)) 5878 // result: (CMOVWCS x y cond) 5879 for { 5880 x := v_0 5881 y := v_1 5882 if v_2.Op != OpAMD64InvertFlags { 5883 break 5884 } 5885 cond := v_2.Args[0] 5886 v.reset(OpAMD64CMOVWCS) 5887 v.AddArg3(x, y, cond) 5888 return true 5889 } 5890 // match: (CMOVWHI y _ (FlagEQ)) 5891 // result: y 5892 for { 5893 y := v_0 5894 if v_2.Op != OpAMD64FlagEQ { 5895 break 5896 } 5897 v.copyOf(y) 5898 return true 5899 } 5900 // match: (CMOVWHI _ x (FlagGT_UGT)) 5901 // result: x 5902 for { 5903 x := v_1 5904 if v_2.Op != OpAMD64FlagGT_UGT { 5905 break 5906 } 5907 v.copyOf(x) 5908 return true 5909 } 5910 // match: (CMOVWHI y _ (FlagGT_ULT)) 5911 // result: y 5912 for { 5913 y := v_0 5914 if v_2.Op != OpAMD64FlagGT_ULT { 5915 break 5916 } 5917 v.copyOf(y) 5918 return true 5919 } 5920 // match: (CMOVWHI y _ (FlagLT_ULT)) 5921 // result: y 5922 for { 5923 y := v_0 5924 if v_2.Op != OpAMD64FlagLT_ULT { 5925 break 5926 } 5927 v.copyOf(y) 5928 return true 5929 } 5930 // match: (CMOVWHI _ x (FlagLT_UGT)) 5931 // result: x 5932 for { 5933 x := v_1 5934 if v_2.Op != OpAMD64FlagLT_UGT { 5935 break 5936 } 5937 v.copyOf(x) 5938 return true 5939 } 5940 return false 5941 } 5942 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { 5943 v_2 := v.Args[2] 5944 v_1 := v.Args[1] 5945 v_0 := v.Args[0] 5946 // match: (CMOVWLE x y (InvertFlags cond)) 5947 // result: (CMOVWGE x y cond) 5948 for { 5949 x := v_0 5950 y := v_1 5951 if v_2.Op != OpAMD64InvertFlags { 5952 break 5953 } 5954 cond := v_2.Args[0] 5955 v.reset(OpAMD64CMOVWGE) 5956 v.AddArg3(x, y, cond) 5957 return true 5958 } 5959 // match: (CMOVWLE _ x (FlagEQ)) 5960 // result: x 5961 for { 5962 x := v_1 5963 if v_2.Op != OpAMD64FlagEQ { 5964 break 5965 } 5966 v.copyOf(x) 5967 return true 5968 } 5969 // match: (CMOVWLE y _ (FlagGT_UGT)) 5970 // result: y 5971 for { 5972 y := v_0 5973 if v_2.Op != OpAMD64FlagGT_UGT { 5974 break 5975 } 5976 v.copyOf(y) 5977 return true 5978 } 5979 // match: (CMOVWLE y _ (FlagGT_ULT)) 5980 // result: y 5981 for { 5982 y := v_0 5983 if v_2.Op != OpAMD64FlagGT_ULT { 5984 break 5985 } 5986 v.copyOf(y) 5987 return true 5988 } 5989 // match: (CMOVWLE _ x (FlagLT_ULT)) 5990 // result: x 5991 for { 5992 x := v_1 5993 if v_2.Op != OpAMD64FlagLT_ULT { 5994 break 5995 } 5996 v.copyOf(x) 5997 return true 5998 } 5999 // match: (CMOVWLE _ x (FlagLT_UGT)) 6000 // result: x 6001 for { 6002 x := v_1 6003 if v_2.Op != OpAMD64FlagLT_UGT { 6004 break 6005 } 6006 v.copyOf(x) 6007 return true 6008 } 6009 return false 6010 } 6011 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { 6012 v_2 := v.Args[2] 6013 v_1 := v.Args[1] 6014 v_0 := v.Args[0] 6015 // match: (CMOVWLS x y (InvertFlags cond)) 6016 // result: (CMOVWCC x y cond) 6017 for { 6018 x := v_0 6019 y := v_1 6020 if v_2.Op != OpAMD64InvertFlags { 6021 break 6022 } 6023 cond := v_2.Args[0] 6024 v.reset(OpAMD64CMOVWCC) 6025 v.AddArg3(x, y, cond) 6026 return true 6027 } 6028 // match: (CMOVWLS _ x (FlagEQ)) 6029 // result: x 6030 for { 6031 x := v_1 6032 if v_2.Op != OpAMD64FlagEQ { 6033 break 6034 } 6035 v.copyOf(x) 6036 return true 6037 } 6038 // match: (CMOVWLS y _ (FlagGT_UGT)) 6039 // result: y 6040 for { 6041 y := v_0 6042 if v_2.Op != OpAMD64FlagGT_UGT { 6043 break 6044 } 6045 v.copyOf(y) 6046 return true 6047 } 6048 // match: (CMOVWLS _ x (FlagGT_ULT)) 6049 // result: x 6050 for { 6051 x := v_1 6052 if v_2.Op != OpAMD64FlagGT_ULT { 6053 break 6054 } 6055 v.copyOf(x) 6056 return true 6057 } 6058 // match: (CMOVWLS _ x (FlagLT_ULT)) 6059 // result: x 6060 for { 6061 x := v_1 6062 if v_2.Op != OpAMD64FlagLT_ULT { 6063 break 6064 } 6065 v.copyOf(x) 6066 return true 6067 } 6068 // match: (CMOVWLS y _ (FlagLT_UGT)) 6069 // result: y 6070 for { 6071 y := v_0 6072 if v_2.Op != OpAMD64FlagLT_UGT { 6073 break 6074 } 6075 v.copyOf(y) 6076 return true 6077 } 6078 return false 6079 } 6080 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { 6081 v_2 := v.Args[2] 6082 v_1 := v.Args[1] 6083 v_0 := v.Args[0] 6084 // match: (CMOVWLT x y (InvertFlags cond)) 6085 // result: (CMOVWGT x y cond) 6086 for { 6087 x := v_0 6088 y := v_1 6089 if v_2.Op != OpAMD64InvertFlags { 6090 break 6091 } 6092 cond := v_2.Args[0] 6093 v.reset(OpAMD64CMOVWGT) 6094 v.AddArg3(x, y, cond) 6095 return true 6096 } 6097 // match: (CMOVWLT y _ (FlagEQ)) 6098 // result: y 6099 for { 6100 y := v_0 6101 if v_2.Op != OpAMD64FlagEQ { 6102 break 6103 } 6104 v.copyOf(y) 6105 return true 6106 } 6107 // match: (CMOVWLT y _ (FlagGT_UGT)) 6108 // result: y 6109 for { 6110 y := v_0 6111 if v_2.Op != OpAMD64FlagGT_UGT { 6112 break 6113 } 6114 v.copyOf(y) 6115 return true 6116 } 6117 // match: (CMOVWLT y _ (FlagGT_ULT)) 6118 // result: y 6119 for { 6120 y := v_0 6121 if v_2.Op != OpAMD64FlagGT_ULT { 6122 break 6123 } 6124 v.copyOf(y) 6125 return true 6126 } 6127 // match: (CMOVWLT _ x (FlagLT_ULT)) 6128 // result: x 6129 for { 6130 x := v_1 6131 if v_2.Op != OpAMD64FlagLT_ULT { 6132 break 6133 } 6134 v.copyOf(x) 6135 return true 6136 } 6137 // match: (CMOVWLT _ x (FlagLT_UGT)) 6138 // result: x 6139 for { 6140 x := v_1 6141 if v_2.Op != OpAMD64FlagLT_UGT { 6142 break 6143 } 6144 v.copyOf(x) 6145 return true 6146 } 6147 return false 6148 } 6149 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { 6150 v_2 := v.Args[2] 6151 v_1 := v.Args[1] 6152 v_0 := v.Args[0] 6153 // match: (CMOVWNE x y (InvertFlags cond)) 6154 // result: (CMOVWNE x y cond) 6155 for { 6156 x := v_0 6157 y := v_1 6158 if v_2.Op != OpAMD64InvertFlags { 6159 break 6160 } 6161 cond := v_2.Args[0] 6162 v.reset(OpAMD64CMOVWNE) 6163 v.AddArg3(x, y, cond) 6164 return true 6165 } 6166 // match: (CMOVWNE y _ (FlagEQ)) 6167 // result: y 6168 for { 6169 y := v_0 6170 if v_2.Op != OpAMD64FlagEQ { 6171 break 6172 } 6173 v.copyOf(y) 6174 return true 6175 } 6176 // match: (CMOVWNE _ x (FlagGT_UGT)) 6177 // result: x 6178 for { 6179 x := v_1 6180 if v_2.Op != OpAMD64FlagGT_UGT { 6181 break 6182 } 6183 v.copyOf(x) 6184 return true 6185 } 6186 // match: (CMOVWNE _ x (FlagGT_ULT)) 6187 // result: x 6188 for { 6189 x := v_1 6190 if v_2.Op != OpAMD64FlagGT_ULT { 6191 break 6192 } 6193 v.copyOf(x) 6194 return true 6195 } 6196 // match: (CMOVWNE _ x (FlagLT_ULT)) 6197 // result: x 6198 for { 6199 x := v_1 6200 if v_2.Op != OpAMD64FlagLT_ULT { 6201 break 6202 } 6203 v.copyOf(x) 6204 return true 6205 } 6206 // match: (CMOVWNE _ x (FlagLT_UGT)) 6207 // result: x 6208 for { 6209 x := v_1 6210 if v_2.Op != OpAMD64FlagLT_UGT { 6211 break 6212 } 6213 v.copyOf(x) 6214 return true 6215 } 6216 return false 6217 } 6218 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { 6219 v_1 := v.Args[1] 6220 v_0 := v.Args[0] 6221 b := v.Block 6222 // match: (CMPB x (MOVLconst [c])) 6223 // result: (CMPBconst x [int8(c)]) 6224 for { 6225 x := v_0 6226 if v_1.Op != OpAMD64MOVLconst { 6227 break 6228 } 6229 c := auxIntToInt32(v_1.AuxInt) 6230 v.reset(OpAMD64CMPBconst) 6231 v.AuxInt = int8ToAuxInt(int8(c)) 6232 v.AddArg(x) 6233 return true 6234 } 6235 // match: (CMPB (MOVLconst [c]) x) 6236 // result: (InvertFlags (CMPBconst x [int8(c)])) 6237 for { 6238 if v_0.Op != OpAMD64MOVLconst { 6239 break 6240 } 6241 c := auxIntToInt32(v_0.AuxInt) 6242 x := v_1 6243 v.reset(OpAMD64InvertFlags) 6244 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 6245 v0.AuxInt = int8ToAuxInt(int8(c)) 6246 v0.AddArg(x) 6247 v.AddArg(v0) 6248 return true 6249 } 6250 // match: (CMPB x y) 6251 // cond: canonLessThan(x,y) 6252 // result: (InvertFlags (CMPB y x)) 6253 for { 6254 x := v_0 6255 y := v_1 6256 if !(canonLessThan(x, y)) { 6257 break 6258 } 6259 v.reset(OpAMD64InvertFlags) 6260 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 6261 v0.AddArg2(y, x) 6262 v.AddArg(v0) 6263 return true 6264 } 6265 // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x) 6266 // cond: canMergeLoad(v, l) && clobber(l) 6267 // result: (CMPBload {sym} [off] ptr x mem) 6268 for { 6269 l := v_0 6270 if l.Op != OpAMD64MOVBload { 6271 break 6272 } 6273 off := auxIntToInt32(l.AuxInt) 6274 sym := auxToSym(l.Aux) 6275 mem := l.Args[1] 6276 ptr := l.Args[0] 6277 x := v_1 6278 if !(canMergeLoad(v, l) && clobber(l)) { 6279 break 6280 } 6281 v.reset(OpAMD64CMPBload) 6282 v.AuxInt = int32ToAuxInt(off) 6283 v.Aux = symToAux(sym) 6284 v.AddArg3(ptr, x, mem) 6285 return true 6286 } 6287 // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) 6288 // cond: canMergeLoad(v, l) && clobber(l) 6289 // result: (InvertFlags (CMPBload {sym} [off] ptr x mem)) 6290 for { 6291 x := v_0 6292 l := v_1 6293 if l.Op != OpAMD64MOVBload { 6294 break 6295 } 6296 off := auxIntToInt32(l.AuxInt) 6297 sym := auxToSym(l.Aux) 6298 mem := l.Args[1] 6299 ptr := l.Args[0] 6300 if !(canMergeLoad(v, l) && clobber(l)) { 6301 break 6302 } 6303 v.reset(OpAMD64InvertFlags) 6304 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags) 6305 v0.AuxInt = int32ToAuxInt(off) 6306 v0.Aux = symToAux(sym) 6307 v0.AddArg3(ptr, x, mem) 6308 v.AddArg(v0) 6309 return true 6310 } 6311 return false 6312 } 6313 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { 6314 v_0 := v.Args[0] 6315 b := v.Block 6316 // match: (CMPBconst (MOVLconst [x]) [y]) 6317 // cond: int8(x)==y 6318 // result: (FlagEQ) 6319 for { 6320 y := auxIntToInt8(v.AuxInt) 6321 if v_0.Op != OpAMD64MOVLconst { 6322 break 6323 } 6324 x := auxIntToInt32(v_0.AuxInt) 6325 if !(int8(x) == y) { 6326 break 6327 } 6328 v.reset(OpAMD64FlagEQ) 6329 return true 6330 } 6331 // match: (CMPBconst (MOVLconst [x]) [y]) 6332 // cond: int8(x)<y && uint8(x)<uint8(y) 6333 // result: (FlagLT_ULT) 6334 for { 6335 y := auxIntToInt8(v.AuxInt) 6336 if v_0.Op != OpAMD64MOVLconst { 6337 break 6338 } 6339 x := auxIntToInt32(v_0.AuxInt) 6340 if !(int8(x) < y && uint8(x) < uint8(y)) { 6341 break 6342 } 6343 v.reset(OpAMD64FlagLT_ULT) 6344 return true 6345 } 6346 // match: (CMPBconst (MOVLconst [x]) [y]) 6347 // cond: int8(x)<y && uint8(x)>uint8(y) 6348 // result: (FlagLT_UGT) 6349 for { 6350 y := auxIntToInt8(v.AuxInt) 6351 if v_0.Op != OpAMD64MOVLconst { 6352 break 6353 } 6354 x := auxIntToInt32(v_0.AuxInt) 6355 if !(int8(x) < y && uint8(x) > uint8(y)) { 6356 break 6357 } 6358 v.reset(OpAMD64FlagLT_UGT) 6359 return true 6360 } 6361 // match: (CMPBconst (MOVLconst [x]) [y]) 6362 // cond: int8(x)>y && uint8(x)<uint8(y) 6363 // result: (FlagGT_ULT) 6364 for { 6365 y := auxIntToInt8(v.AuxInt) 6366 if v_0.Op != OpAMD64MOVLconst { 6367 break 6368 } 6369 x := auxIntToInt32(v_0.AuxInt) 6370 if !(int8(x) > y && uint8(x) < uint8(y)) { 6371 break 6372 } 6373 v.reset(OpAMD64FlagGT_ULT) 6374 return true 6375 } 6376 // match: (CMPBconst (MOVLconst [x]) [y]) 6377 // cond: int8(x)>y && uint8(x)>uint8(y) 6378 // result: (FlagGT_UGT) 6379 for { 6380 y := auxIntToInt8(v.AuxInt) 6381 if v_0.Op != OpAMD64MOVLconst { 6382 break 6383 } 6384 x := auxIntToInt32(v_0.AuxInt) 6385 if !(int8(x) > y && uint8(x) > uint8(y)) { 6386 break 6387 } 6388 v.reset(OpAMD64FlagGT_UGT) 6389 return true 6390 } 6391 // match: (CMPBconst (ANDLconst _ [m]) [n]) 6392 // cond: 0 <= int8(m) && int8(m) < n 6393 // result: (FlagLT_ULT) 6394 for { 6395 n := auxIntToInt8(v.AuxInt) 6396 if v_0.Op != OpAMD64ANDLconst { 6397 break 6398 } 6399 m := auxIntToInt32(v_0.AuxInt) 6400 if !(0 <= int8(m) && int8(m) < n) { 6401 break 6402 } 6403 v.reset(OpAMD64FlagLT_ULT) 6404 return true 6405 } 6406 // match: (CMPBconst a:(ANDL x y) [0]) 6407 // cond: a.Uses == 1 6408 // result: (TESTB x y) 6409 for { 6410 if auxIntToInt8(v.AuxInt) != 0 { 6411 break 6412 } 6413 a := v_0 6414 if a.Op != OpAMD64ANDL { 6415 break 6416 } 6417 y := a.Args[1] 6418 x := a.Args[0] 6419 if !(a.Uses == 1) { 6420 break 6421 } 6422 v.reset(OpAMD64TESTB) 6423 v.AddArg2(x, y) 6424 return true 6425 } 6426 // match: (CMPBconst a:(ANDLconst [c] x) [0]) 6427 // cond: a.Uses == 1 6428 // result: (TESTBconst [int8(c)] x) 6429 for { 6430 if auxIntToInt8(v.AuxInt) != 0 { 6431 break 6432 } 6433 a := v_0 6434 if a.Op != OpAMD64ANDLconst { 6435 break 6436 } 6437 c := auxIntToInt32(a.AuxInt) 6438 x := a.Args[0] 6439 if !(a.Uses == 1) { 6440 break 6441 } 6442 v.reset(OpAMD64TESTBconst) 6443 v.AuxInt = int8ToAuxInt(int8(c)) 6444 v.AddArg(x) 6445 return true 6446 } 6447 // match: (CMPBconst x [0]) 6448 // result: (TESTB x x) 6449 for { 6450 if auxIntToInt8(v.AuxInt) != 0 { 6451 break 6452 } 6453 x := v_0 6454 v.reset(OpAMD64TESTB) 6455 v.AddArg2(x, x) 6456 return true 6457 } 6458 // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) 6459 // cond: l.Uses == 1 && clobber(l) 6460 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) 6461 for { 6462 c := auxIntToInt8(v.AuxInt) 6463 l := v_0 6464 if l.Op != OpAMD64MOVBload { 6465 break 6466 } 6467 off := auxIntToInt32(l.AuxInt) 6468 sym := auxToSym(l.Aux) 6469 mem := l.Args[1] 6470 ptr := l.Args[0] 6471 if !(l.Uses == 1 && clobber(l)) { 6472 break 6473 } 6474 b = l.Block 6475 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) 6476 v.copyOf(v0) 6477 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 6478 v0.Aux = symToAux(sym) 6479 v0.AddArg2(ptr, mem) 6480 return true 6481 } 6482 return false 6483 } 6484 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool { 6485 v_1 := v.Args[1] 6486 v_0 := v.Args[0] 6487 // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 6488 // cond: ValAndOff(valoff1).canAdd32(off2) 6489 // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 6490 for { 6491 valoff1 := auxIntToValAndOff(v.AuxInt) 6492 sym := auxToSym(v.Aux) 6493 if v_0.Op != OpAMD64ADDQconst { 6494 break 6495 } 6496 off2 := auxIntToInt32(v_0.AuxInt) 6497 base := v_0.Args[0] 6498 mem := v_1 6499 if !(ValAndOff(valoff1).canAdd32(off2)) { 6500 break 6501 } 6502 v.reset(OpAMD64CMPBconstload) 6503 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 6504 v.Aux = symToAux(sym) 6505 v.AddArg2(base, mem) 6506 return true 6507 } 6508 // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 6509 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 6510 // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 6511 for { 6512 valoff1 := auxIntToValAndOff(v.AuxInt) 6513 sym1 := auxToSym(v.Aux) 6514 if v_0.Op != OpAMD64LEAQ { 6515 break 6516 } 6517 off2 := auxIntToInt32(v_0.AuxInt) 6518 sym2 := auxToSym(v_0.Aux) 6519 base := v_0.Args[0] 6520 mem := v_1 6521 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 6522 break 6523 } 6524 v.reset(OpAMD64CMPBconstload) 6525 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 6526 v.Aux = symToAux(mergeSym(sym1, sym2)) 6527 v.AddArg2(base, mem) 6528 return true 6529 } 6530 return false 6531 } 6532 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { 6533 v_2 := v.Args[2] 6534 v_1 := v.Args[1] 6535 v_0 := v.Args[0] 6536 // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem) 6537 // cond: is32Bit(int64(off1)+int64(off2)) 6538 // result: (CMPBload [off1+off2] {sym} base val mem) 6539 for { 6540 off1 := auxIntToInt32(v.AuxInt) 6541 sym := auxToSym(v.Aux) 6542 if v_0.Op != OpAMD64ADDQconst { 6543 break 6544 } 6545 off2 := auxIntToInt32(v_0.AuxInt) 6546 base := v_0.Args[0] 6547 val := v_1 6548 mem := v_2 6549 if !(is32Bit(int64(off1) + int64(off2))) { 6550 break 6551 } 6552 v.reset(OpAMD64CMPBload) 6553 v.AuxInt = int32ToAuxInt(off1 + off2) 6554 v.Aux = symToAux(sym) 6555 v.AddArg3(base, val, mem) 6556 return true 6557 } 6558 // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6559 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 6560 // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6561 for { 6562 off1 := auxIntToInt32(v.AuxInt) 6563 sym1 := auxToSym(v.Aux) 6564 if v_0.Op != OpAMD64LEAQ { 6565 break 6566 } 6567 off2 := auxIntToInt32(v_0.AuxInt) 6568 sym2 := auxToSym(v_0.Aux) 6569 base := v_0.Args[0] 6570 val := v_1 6571 mem := v_2 6572 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 6573 break 6574 } 6575 v.reset(OpAMD64CMPBload) 6576 v.AuxInt = int32ToAuxInt(off1 + off2) 6577 v.Aux = symToAux(mergeSym(sym1, sym2)) 6578 v.AddArg3(base, val, mem) 6579 return true 6580 } 6581 // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) 6582 // result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) 6583 for { 6584 off := auxIntToInt32(v.AuxInt) 6585 sym := auxToSym(v.Aux) 6586 ptr := v_0 6587 if v_1.Op != OpAMD64MOVLconst { 6588 break 6589 } 6590 c := auxIntToInt32(v_1.AuxInt) 6591 mem := v_2 6592 v.reset(OpAMD64CMPBconstload) 6593 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) 6594 v.Aux = symToAux(sym) 6595 v.AddArg2(ptr, mem) 6596 return true 6597 } 6598 return false 6599 } 6600 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { 6601 v_1 := v.Args[1] 6602 v_0 := v.Args[0] 6603 b := v.Block 6604 // match: (CMPL x (MOVLconst [c])) 6605 // result: (CMPLconst x [c]) 6606 for { 6607 x := v_0 6608 if v_1.Op != OpAMD64MOVLconst { 6609 break 6610 } 6611 c := auxIntToInt32(v_1.AuxInt) 6612 v.reset(OpAMD64CMPLconst) 6613 v.AuxInt = int32ToAuxInt(c) 6614 v.AddArg(x) 6615 return true 6616 } 6617 // match: (CMPL (MOVLconst [c]) x) 6618 // result: (InvertFlags (CMPLconst x [c])) 6619 for { 6620 if v_0.Op != OpAMD64MOVLconst { 6621 break 6622 } 6623 c := auxIntToInt32(v_0.AuxInt) 6624 x := v_1 6625 v.reset(OpAMD64InvertFlags) 6626 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 6627 v0.AuxInt = int32ToAuxInt(c) 6628 v0.AddArg(x) 6629 v.AddArg(v0) 6630 return true 6631 } 6632 // match: (CMPL x y) 6633 // cond: canonLessThan(x,y) 6634 // result: (InvertFlags (CMPL y x)) 6635 for { 6636 x := v_0 6637 y := v_1 6638 if !(canonLessThan(x, y)) { 6639 break 6640 } 6641 v.reset(OpAMD64InvertFlags) 6642 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 6643 v0.AddArg2(y, x) 6644 v.AddArg(v0) 6645 return true 6646 } 6647 // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x) 6648 // cond: canMergeLoad(v, l) && clobber(l) 6649 // result: (CMPLload {sym} [off] ptr x mem) 6650 for { 6651 l := v_0 6652 if l.Op != OpAMD64MOVLload { 6653 break 6654 } 6655 off := auxIntToInt32(l.AuxInt) 6656 sym := auxToSym(l.Aux) 6657 mem := l.Args[1] 6658 ptr := l.Args[0] 6659 x := v_1 6660 if !(canMergeLoad(v, l) && clobber(l)) { 6661 break 6662 } 6663 v.reset(OpAMD64CMPLload) 6664 v.AuxInt = int32ToAuxInt(off) 6665 v.Aux = symToAux(sym) 6666 v.AddArg3(ptr, x, mem) 6667 return true 6668 } 6669 // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) 6670 // cond: canMergeLoad(v, l) && clobber(l) 6671 // result: (InvertFlags (CMPLload {sym} [off] ptr x mem)) 6672 for { 6673 x := v_0 6674 l := v_1 6675 if l.Op != OpAMD64MOVLload { 6676 break 6677 } 6678 off := auxIntToInt32(l.AuxInt) 6679 sym := auxToSym(l.Aux) 6680 mem := l.Args[1] 6681 ptr := l.Args[0] 6682 if !(canMergeLoad(v, l) && clobber(l)) { 6683 break 6684 } 6685 v.reset(OpAMD64InvertFlags) 6686 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags) 6687 v0.AuxInt = int32ToAuxInt(off) 6688 v0.Aux = symToAux(sym) 6689 v0.AddArg3(ptr, x, mem) 6690 v.AddArg(v0) 6691 return true 6692 } 6693 return false 6694 } 6695 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { 6696 v_0 := v.Args[0] 6697 b := v.Block 6698 // match: (CMPLconst (MOVLconst [x]) [y]) 6699 // cond: x==y 6700 // result: (FlagEQ) 6701 for { 6702 y := auxIntToInt32(v.AuxInt) 6703 if v_0.Op != OpAMD64MOVLconst { 6704 break 6705 } 6706 x := auxIntToInt32(v_0.AuxInt) 6707 if !(x == y) { 6708 break 6709 } 6710 v.reset(OpAMD64FlagEQ) 6711 return true 6712 } 6713 // match: (CMPLconst (MOVLconst [x]) [y]) 6714 // cond: x<y && uint32(x)<uint32(y) 6715 // result: (FlagLT_ULT) 6716 for { 6717 y := auxIntToInt32(v.AuxInt) 6718 if v_0.Op != OpAMD64MOVLconst { 6719 break 6720 } 6721 x := auxIntToInt32(v_0.AuxInt) 6722 if !(x < y && uint32(x) < uint32(y)) { 6723 break 6724 } 6725 v.reset(OpAMD64FlagLT_ULT) 6726 return true 6727 } 6728 // match: (CMPLconst (MOVLconst [x]) [y]) 6729 // cond: x<y && uint32(x)>uint32(y) 6730 // result: (FlagLT_UGT) 6731 for { 6732 y := auxIntToInt32(v.AuxInt) 6733 if v_0.Op != OpAMD64MOVLconst { 6734 break 6735 } 6736 x := auxIntToInt32(v_0.AuxInt) 6737 if !(x < y && uint32(x) > uint32(y)) { 6738 break 6739 } 6740 v.reset(OpAMD64FlagLT_UGT) 6741 return true 6742 } 6743 // match: (CMPLconst (MOVLconst [x]) [y]) 6744 // cond: x>y && uint32(x)<uint32(y) 6745 // result: (FlagGT_ULT) 6746 for { 6747 y := auxIntToInt32(v.AuxInt) 6748 if v_0.Op != OpAMD64MOVLconst { 6749 break 6750 } 6751 x := auxIntToInt32(v_0.AuxInt) 6752 if !(x > y && uint32(x) < uint32(y)) { 6753 break 6754 } 6755 v.reset(OpAMD64FlagGT_ULT) 6756 return true 6757 } 6758 // match: (CMPLconst (MOVLconst [x]) [y]) 6759 // cond: x>y && uint32(x)>uint32(y) 6760 // result: (FlagGT_UGT) 6761 for { 6762 y := auxIntToInt32(v.AuxInt) 6763 if v_0.Op != OpAMD64MOVLconst { 6764 break 6765 } 6766 x := auxIntToInt32(v_0.AuxInt) 6767 if !(x > y && uint32(x) > uint32(y)) { 6768 break 6769 } 6770 v.reset(OpAMD64FlagGT_UGT) 6771 return true 6772 } 6773 // match: (CMPLconst (SHRLconst _ [c]) [n]) 6774 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 6775 // result: (FlagLT_ULT) 6776 for { 6777 n := auxIntToInt32(v.AuxInt) 6778 if v_0.Op != OpAMD64SHRLconst { 6779 break 6780 } 6781 c := auxIntToInt8(v_0.AuxInt) 6782 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 6783 break 6784 } 6785 v.reset(OpAMD64FlagLT_ULT) 6786 return true 6787 } 6788 // match: (CMPLconst (ANDLconst _ [m]) [n]) 6789 // cond: 0 <= m && m < n 6790 // result: (FlagLT_ULT) 6791 for { 6792 n := auxIntToInt32(v.AuxInt) 6793 if v_0.Op != OpAMD64ANDLconst { 6794 break 6795 } 6796 m := auxIntToInt32(v_0.AuxInt) 6797 if !(0 <= m && m < n) { 6798 break 6799 } 6800 v.reset(OpAMD64FlagLT_ULT) 6801 return true 6802 } 6803 // match: (CMPLconst a:(ANDL x y) [0]) 6804 // cond: a.Uses == 1 6805 // result: (TESTL x y) 6806 for { 6807 if auxIntToInt32(v.AuxInt) != 0 { 6808 break 6809 } 6810 a := v_0 6811 if a.Op != OpAMD64ANDL { 6812 break 6813 } 6814 y := a.Args[1] 6815 x := a.Args[0] 6816 if !(a.Uses == 1) { 6817 break 6818 } 6819 v.reset(OpAMD64TESTL) 6820 v.AddArg2(x, y) 6821 return true 6822 } 6823 // match: (CMPLconst a:(ANDLconst [c] x) [0]) 6824 // cond: a.Uses == 1 6825 // result: (TESTLconst [c] x) 6826 for { 6827 if auxIntToInt32(v.AuxInt) != 0 { 6828 break 6829 } 6830 a := v_0 6831 if a.Op != OpAMD64ANDLconst { 6832 break 6833 } 6834 c := auxIntToInt32(a.AuxInt) 6835 x := a.Args[0] 6836 if !(a.Uses == 1) { 6837 break 6838 } 6839 v.reset(OpAMD64TESTLconst) 6840 v.AuxInt = int32ToAuxInt(c) 6841 v.AddArg(x) 6842 return true 6843 } 6844 // match: (CMPLconst x [0]) 6845 // result: (TESTL x x) 6846 for { 6847 if auxIntToInt32(v.AuxInt) != 0 { 6848 break 6849 } 6850 x := v_0 6851 v.reset(OpAMD64TESTL) 6852 v.AddArg2(x, x) 6853 return true 6854 } 6855 // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c]) 6856 // cond: l.Uses == 1 && clobber(l) 6857 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) 6858 for { 6859 c := auxIntToInt32(v.AuxInt) 6860 l := v_0 6861 if l.Op != OpAMD64MOVLload { 6862 break 6863 } 6864 off := auxIntToInt32(l.AuxInt) 6865 sym := auxToSym(l.Aux) 6866 mem := l.Args[1] 6867 ptr := l.Args[0] 6868 if !(l.Uses == 1 && clobber(l)) { 6869 break 6870 } 6871 b = l.Block 6872 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) 6873 v.copyOf(v0) 6874 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) 6875 v0.Aux = symToAux(sym) 6876 v0.AddArg2(ptr, mem) 6877 return true 6878 } 6879 return false 6880 } 6881 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool { 6882 v_1 := v.Args[1] 6883 v_0 := v.Args[0] 6884 // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 6885 // cond: ValAndOff(valoff1).canAdd32(off2) 6886 // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 6887 for { 6888 valoff1 := auxIntToValAndOff(v.AuxInt) 6889 sym := auxToSym(v.Aux) 6890 if v_0.Op != OpAMD64ADDQconst { 6891 break 6892 } 6893 off2 := auxIntToInt32(v_0.AuxInt) 6894 base := v_0.Args[0] 6895 mem := v_1 6896 if !(ValAndOff(valoff1).canAdd32(off2)) { 6897 break 6898 } 6899 v.reset(OpAMD64CMPLconstload) 6900 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 6901 v.Aux = symToAux(sym) 6902 v.AddArg2(base, mem) 6903 return true 6904 } 6905 // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 6906 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 6907 // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 6908 for { 6909 valoff1 := auxIntToValAndOff(v.AuxInt) 6910 sym1 := auxToSym(v.Aux) 6911 if v_0.Op != OpAMD64LEAQ { 6912 break 6913 } 6914 off2 := auxIntToInt32(v_0.AuxInt) 6915 sym2 := auxToSym(v_0.Aux) 6916 base := v_0.Args[0] 6917 mem := v_1 6918 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 6919 break 6920 } 6921 v.reset(OpAMD64CMPLconstload) 6922 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 6923 v.Aux = symToAux(mergeSym(sym1, sym2)) 6924 v.AddArg2(base, mem) 6925 return true 6926 } 6927 return false 6928 } 6929 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { 6930 v_2 := v.Args[2] 6931 v_1 := v.Args[1] 6932 v_0 := v.Args[0] 6933 // match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem) 6934 // cond: is32Bit(int64(off1)+int64(off2)) 6935 // result: (CMPLload [off1+off2] {sym} base val mem) 6936 for { 6937 off1 := auxIntToInt32(v.AuxInt) 6938 sym := auxToSym(v.Aux) 6939 if v_0.Op != OpAMD64ADDQconst { 6940 break 6941 } 6942 off2 := auxIntToInt32(v_0.AuxInt) 6943 base := v_0.Args[0] 6944 val := v_1 6945 mem := v_2 6946 if !(is32Bit(int64(off1) + int64(off2))) { 6947 break 6948 } 6949 v.reset(OpAMD64CMPLload) 6950 v.AuxInt = int32ToAuxInt(off1 + off2) 6951 v.Aux = symToAux(sym) 6952 v.AddArg3(base, val, mem) 6953 return true 6954 } 6955 // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 6956 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 6957 // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6958 for { 6959 off1 := auxIntToInt32(v.AuxInt) 6960 sym1 := auxToSym(v.Aux) 6961 if v_0.Op != OpAMD64LEAQ { 6962 break 6963 } 6964 off2 := auxIntToInt32(v_0.AuxInt) 6965 sym2 := auxToSym(v_0.Aux) 6966 base := v_0.Args[0] 6967 val := v_1 6968 mem := v_2 6969 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 6970 break 6971 } 6972 v.reset(OpAMD64CMPLload) 6973 v.AuxInt = int32ToAuxInt(off1 + off2) 6974 v.Aux = symToAux(mergeSym(sym1, sym2)) 6975 v.AddArg3(base, val, mem) 6976 return true 6977 } 6978 // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) 6979 // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) 6980 for { 6981 off := auxIntToInt32(v.AuxInt) 6982 sym := auxToSym(v.Aux) 6983 ptr := v_0 6984 if v_1.Op != OpAMD64MOVLconst { 6985 break 6986 } 6987 c := auxIntToInt32(v_1.AuxInt) 6988 mem := v_2 6989 v.reset(OpAMD64CMPLconstload) 6990 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) 6991 v.Aux = symToAux(sym) 6992 v.AddArg2(ptr, mem) 6993 return true 6994 } 6995 return false 6996 } 6997 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { 6998 v_1 := v.Args[1] 6999 v_0 := v.Args[0] 7000 b := v.Block 7001 // match: (CMPQ x (MOVQconst [c])) 7002 // cond: is32Bit(c) 7003 // result: (CMPQconst x [int32(c)]) 7004 for { 7005 x := v_0 7006 if v_1.Op != OpAMD64MOVQconst { 7007 break 7008 } 7009 c := auxIntToInt64(v_1.AuxInt) 7010 if !(is32Bit(c)) { 7011 break 7012 } 7013 v.reset(OpAMD64CMPQconst) 7014 v.AuxInt = int32ToAuxInt(int32(c)) 7015 v.AddArg(x) 7016 return true 7017 } 7018 // match: (CMPQ (MOVQconst [c]) x) 7019 // cond: is32Bit(c) 7020 // result: (InvertFlags (CMPQconst x [int32(c)])) 7021 for { 7022 if v_0.Op != OpAMD64MOVQconst { 7023 break 7024 } 7025 c := auxIntToInt64(v_0.AuxInt) 7026 x := v_1 7027 if !(is32Bit(c)) { 7028 break 7029 } 7030 v.reset(OpAMD64InvertFlags) 7031 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 7032 v0.AuxInt = int32ToAuxInt(int32(c)) 7033 v0.AddArg(x) 7034 v.AddArg(v0) 7035 return true 7036 } 7037 // match: (CMPQ x y) 7038 // cond: canonLessThan(x,y) 7039 // result: (InvertFlags (CMPQ y x)) 7040 for { 7041 x := v_0 7042 y := v_1 7043 if !(canonLessThan(x, y)) { 7044 break 7045 } 7046 v.reset(OpAMD64InvertFlags) 7047 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 7048 v0.AddArg2(y, x) 7049 v.AddArg(v0) 7050 return true 7051 } 7052 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) 7053 // cond: x==y 7054 // result: (FlagEQ) 7055 for { 7056 if v_0.Op != OpAMD64MOVQconst { 7057 break 7058 } 7059 x := auxIntToInt64(v_0.AuxInt) 7060 if v_1.Op != OpAMD64MOVQconst { 7061 break 7062 } 7063 y := auxIntToInt64(v_1.AuxInt) 7064 if !(x == y) { 7065 break 7066 } 7067 v.reset(OpAMD64FlagEQ) 7068 return true 7069 } 7070 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) 7071 // cond: x<y && uint64(x)<uint64(y) 7072 // result: (FlagLT_ULT) 7073 for { 7074 if v_0.Op != OpAMD64MOVQconst { 7075 break 7076 } 7077 x := auxIntToInt64(v_0.AuxInt) 7078 if v_1.Op != OpAMD64MOVQconst { 7079 break 7080 } 7081 y := auxIntToInt64(v_1.AuxInt) 7082 if !(x < y && uint64(x) < uint64(y)) { 7083 break 7084 } 7085 v.reset(OpAMD64FlagLT_ULT) 7086 return true 7087 } 7088 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) 7089 // cond: x<y && uint64(x)>uint64(y) 7090 // result: (FlagLT_UGT) 7091 for { 7092 if v_0.Op != OpAMD64MOVQconst { 7093 break 7094 } 7095 x := auxIntToInt64(v_0.AuxInt) 7096 if v_1.Op != OpAMD64MOVQconst { 7097 break 7098 } 7099 y := auxIntToInt64(v_1.AuxInt) 7100 if !(x < y && uint64(x) > uint64(y)) { 7101 break 7102 } 7103 v.reset(OpAMD64FlagLT_UGT) 7104 return true 7105 } 7106 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) 7107 // cond: x>y && uint64(x)<uint64(y) 7108 // result: (FlagGT_ULT) 7109 for { 7110 if v_0.Op != OpAMD64MOVQconst { 7111 break 7112 } 7113 x := auxIntToInt64(v_0.AuxInt) 7114 if v_1.Op != OpAMD64MOVQconst { 7115 break 7116 } 7117 y := auxIntToInt64(v_1.AuxInt) 7118 if !(x > y && uint64(x) < uint64(y)) { 7119 break 7120 } 7121 v.reset(OpAMD64FlagGT_ULT) 7122 return true 7123 } 7124 // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) 7125 // cond: x>y && uint64(x)>uint64(y) 7126 // result: (FlagGT_UGT) 7127 for { 7128 if v_0.Op != OpAMD64MOVQconst { 7129 break 7130 } 7131 x := auxIntToInt64(v_0.AuxInt) 7132 if v_1.Op != OpAMD64MOVQconst { 7133 break 7134 } 7135 y := auxIntToInt64(v_1.AuxInt) 7136 if !(x > y && uint64(x) > uint64(y)) { 7137 break 7138 } 7139 v.reset(OpAMD64FlagGT_UGT) 7140 return true 7141 } 7142 // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x) 7143 // cond: canMergeLoad(v, l) && clobber(l) 7144 // result: (CMPQload {sym} [off] ptr x mem) 7145 for { 7146 l := v_0 7147 if l.Op != OpAMD64MOVQload { 7148 break 7149 } 7150 off := auxIntToInt32(l.AuxInt) 7151 sym := auxToSym(l.Aux) 7152 mem := l.Args[1] 7153 ptr := l.Args[0] 7154 x := v_1 7155 if !(canMergeLoad(v, l) && clobber(l)) { 7156 break 7157 } 7158 v.reset(OpAMD64CMPQload) 7159 v.AuxInt = int32ToAuxInt(off) 7160 v.Aux = symToAux(sym) 7161 v.AddArg3(ptr, x, mem) 7162 return true 7163 } 7164 // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem)) 7165 // cond: canMergeLoad(v, l) && clobber(l) 7166 // result: (InvertFlags (CMPQload {sym} [off] ptr x mem)) 7167 for { 7168 x := v_0 7169 l := v_1 7170 if l.Op != OpAMD64MOVQload { 7171 break 7172 } 7173 off := auxIntToInt32(l.AuxInt) 7174 sym := auxToSym(l.Aux) 7175 mem := l.Args[1] 7176 ptr := l.Args[0] 7177 if !(canMergeLoad(v, l) && clobber(l)) { 7178 break 7179 } 7180 v.reset(OpAMD64InvertFlags) 7181 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags) 7182 v0.AuxInt = int32ToAuxInt(off) 7183 v0.Aux = symToAux(sym) 7184 v0.AddArg3(ptr, x, mem) 7185 v.AddArg(v0) 7186 return true 7187 } 7188 return false 7189 } 7190 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { 7191 v_0 := v.Args[0] 7192 b := v.Block 7193 // match: (CMPQconst (MOVQconst [x]) [y]) 7194 // cond: x==int64(y) 7195 // result: (FlagEQ) 7196 for { 7197 y := auxIntToInt32(v.AuxInt) 7198 if v_0.Op != OpAMD64MOVQconst { 7199 break 7200 } 7201 x := auxIntToInt64(v_0.AuxInt) 7202 if !(x == int64(y)) { 7203 break 7204 } 7205 v.reset(OpAMD64FlagEQ) 7206 return true 7207 } 7208 // match: (CMPQconst (MOVQconst [x]) [y]) 7209 // cond: x<int64(y) && uint64(x)<uint64(int64(y)) 7210 // result: (FlagLT_ULT) 7211 for { 7212 y := auxIntToInt32(v.AuxInt) 7213 if v_0.Op != OpAMD64MOVQconst { 7214 break 7215 } 7216 x := auxIntToInt64(v_0.AuxInt) 7217 if !(x < int64(y) && uint64(x) < uint64(int64(y))) { 7218 break 7219 } 7220 v.reset(OpAMD64FlagLT_ULT) 7221 return true 7222 } 7223 // match: (CMPQconst (MOVQconst [x]) [y]) 7224 // cond: x<int64(y) && uint64(x)>uint64(int64(y)) 7225 // result: (FlagLT_UGT) 7226 for { 7227 y := auxIntToInt32(v.AuxInt) 7228 if v_0.Op != OpAMD64MOVQconst { 7229 break 7230 } 7231 x := auxIntToInt64(v_0.AuxInt) 7232 if !(x < int64(y) && uint64(x) > uint64(int64(y))) { 7233 break 7234 } 7235 v.reset(OpAMD64FlagLT_UGT) 7236 return true 7237 } 7238 // match: (CMPQconst (MOVQconst [x]) [y]) 7239 // cond: x>int64(y) && uint64(x)<uint64(int64(y)) 7240 // result: (FlagGT_ULT) 7241 for { 7242 y := auxIntToInt32(v.AuxInt) 7243 if v_0.Op != OpAMD64MOVQconst { 7244 break 7245 } 7246 x := auxIntToInt64(v_0.AuxInt) 7247 if !(x > int64(y) && uint64(x) < uint64(int64(y))) { 7248 break 7249 } 7250 v.reset(OpAMD64FlagGT_ULT) 7251 return true 7252 } 7253 // match: (CMPQconst (MOVQconst [x]) [y]) 7254 // cond: x>int64(y) && uint64(x)>uint64(int64(y)) 7255 // result: (FlagGT_UGT) 7256 for { 7257 y := auxIntToInt32(v.AuxInt) 7258 if v_0.Op != OpAMD64MOVQconst { 7259 break 7260 } 7261 x := auxIntToInt64(v_0.AuxInt) 7262 if !(x > int64(y) && uint64(x) > uint64(int64(y))) { 7263 break 7264 } 7265 v.reset(OpAMD64FlagGT_UGT) 7266 return true 7267 } 7268 // match: (CMPQconst (MOVBQZX _) [c]) 7269 // cond: 0xFF < c 7270 // result: (FlagLT_ULT) 7271 for { 7272 c := auxIntToInt32(v.AuxInt) 7273 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) { 7274 break 7275 } 7276 v.reset(OpAMD64FlagLT_ULT) 7277 return true 7278 } 7279 // match: (CMPQconst (MOVWQZX _) [c]) 7280 // cond: 0xFFFF < c 7281 // result: (FlagLT_ULT) 7282 for { 7283 c := auxIntToInt32(v.AuxInt) 7284 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) { 7285 break 7286 } 7287 v.reset(OpAMD64FlagLT_ULT) 7288 return true 7289 } 7290 // match: (CMPQconst (SHRQconst _ [c]) [n]) 7291 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 7292 // result: (FlagLT_ULT) 7293 for { 7294 n := auxIntToInt32(v.AuxInt) 7295 if v_0.Op != OpAMD64SHRQconst { 7296 break 7297 } 7298 c := auxIntToInt8(v_0.AuxInt) 7299 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 7300 break 7301 } 7302 v.reset(OpAMD64FlagLT_ULT) 7303 return true 7304 } 7305 // match: (CMPQconst (ANDQconst _ [m]) [n]) 7306 // cond: 0 <= m && m < n 7307 // result: (FlagLT_ULT) 7308 for { 7309 n := auxIntToInt32(v.AuxInt) 7310 if v_0.Op != OpAMD64ANDQconst { 7311 break 7312 } 7313 m := auxIntToInt32(v_0.AuxInt) 7314 if !(0 <= m && m < n) { 7315 break 7316 } 7317 v.reset(OpAMD64FlagLT_ULT) 7318 return true 7319 } 7320 // match: (CMPQconst (ANDLconst _ [m]) [n]) 7321 // cond: 0 <= m && m < n 7322 // result: (FlagLT_ULT) 7323 for { 7324 n := auxIntToInt32(v.AuxInt) 7325 if v_0.Op != OpAMD64ANDLconst { 7326 break 7327 } 7328 m := auxIntToInt32(v_0.AuxInt) 7329 if !(0 <= m && m < n) { 7330 break 7331 } 7332 v.reset(OpAMD64FlagLT_ULT) 7333 return true 7334 } 7335 // match: (CMPQconst a:(ANDQ x y) [0]) 7336 // cond: a.Uses == 1 7337 // result: (TESTQ x y) 7338 for { 7339 if auxIntToInt32(v.AuxInt) != 0 { 7340 break 7341 } 7342 a := v_0 7343 if a.Op != OpAMD64ANDQ { 7344 break 7345 } 7346 y := a.Args[1] 7347 x := a.Args[0] 7348 if !(a.Uses == 1) { 7349 break 7350 } 7351 v.reset(OpAMD64TESTQ) 7352 v.AddArg2(x, y) 7353 return true 7354 } 7355 // match: (CMPQconst a:(ANDQconst [c] x) [0]) 7356 // cond: a.Uses == 1 7357 // result: (TESTQconst [c] x) 7358 for { 7359 if auxIntToInt32(v.AuxInt) != 0 { 7360 break 7361 } 7362 a := v_0 7363 if a.Op != OpAMD64ANDQconst { 7364 break 7365 } 7366 c := auxIntToInt32(a.AuxInt) 7367 x := a.Args[0] 7368 if !(a.Uses == 1) { 7369 break 7370 } 7371 v.reset(OpAMD64TESTQconst) 7372 v.AuxInt = int32ToAuxInt(c) 7373 v.AddArg(x) 7374 return true 7375 } 7376 // match: (CMPQconst x [0]) 7377 // result: (TESTQ x x) 7378 for { 7379 if auxIntToInt32(v.AuxInt) != 0 { 7380 break 7381 } 7382 x := v_0 7383 v.reset(OpAMD64TESTQ) 7384 v.AddArg2(x, x) 7385 return true 7386 } 7387 // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c]) 7388 // cond: l.Uses == 1 && clobber(l) 7389 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) 7390 for { 7391 c := auxIntToInt32(v.AuxInt) 7392 l := v_0 7393 if l.Op != OpAMD64MOVQload { 7394 break 7395 } 7396 off := auxIntToInt32(l.AuxInt) 7397 sym := auxToSym(l.Aux) 7398 mem := l.Args[1] 7399 ptr := l.Args[0] 7400 if !(l.Uses == 1 && clobber(l)) { 7401 break 7402 } 7403 b = l.Block 7404 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) 7405 v.copyOf(v0) 7406 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) 7407 v0.Aux = symToAux(sym) 7408 v0.AddArg2(ptr, mem) 7409 return true 7410 } 7411 return false 7412 } 7413 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool { 7414 v_1 := v.Args[1] 7415 v_0 := v.Args[0] 7416 // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 7417 // cond: ValAndOff(valoff1).canAdd32(off2) 7418 // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 7419 for { 7420 valoff1 := auxIntToValAndOff(v.AuxInt) 7421 sym := auxToSym(v.Aux) 7422 if v_0.Op != OpAMD64ADDQconst { 7423 break 7424 } 7425 off2 := auxIntToInt32(v_0.AuxInt) 7426 base := v_0.Args[0] 7427 mem := v_1 7428 if !(ValAndOff(valoff1).canAdd32(off2)) { 7429 break 7430 } 7431 v.reset(OpAMD64CMPQconstload) 7432 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 7433 v.Aux = symToAux(sym) 7434 v.AddArg2(base, mem) 7435 return true 7436 } 7437 // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 7438 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 7439 // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 7440 for { 7441 valoff1 := auxIntToValAndOff(v.AuxInt) 7442 sym1 := auxToSym(v.Aux) 7443 if v_0.Op != OpAMD64LEAQ { 7444 break 7445 } 7446 off2 := auxIntToInt32(v_0.AuxInt) 7447 sym2 := auxToSym(v_0.Aux) 7448 base := v_0.Args[0] 7449 mem := v_1 7450 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 7451 break 7452 } 7453 v.reset(OpAMD64CMPQconstload) 7454 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 7455 v.Aux = symToAux(mergeSym(sym1, sym2)) 7456 v.AddArg2(base, mem) 7457 return true 7458 } 7459 return false 7460 } 7461 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { 7462 v_2 := v.Args[2] 7463 v_1 := v.Args[1] 7464 v_0 := v.Args[0] 7465 // match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem) 7466 // cond: is32Bit(int64(off1)+int64(off2)) 7467 // result: (CMPQload [off1+off2] {sym} base val mem) 7468 for { 7469 off1 := auxIntToInt32(v.AuxInt) 7470 sym := auxToSym(v.Aux) 7471 if v_0.Op != OpAMD64ADDQconst { 7472 break 7473 } 7474 off2 := auxIntToInt32(v_0.AuxInt) 7475 base := v_0.Args[0] 7476 val := v_1 7477 mem := v_2 7478 if !(is32Bit(int64(off1) + int64(off2))) { 7479 break 7480 } 7481 v.reset(OpAMD64CMPQload) 7482 v.AuxInt = int32ToAuxInt(off1 + off2) 7483 v.Aux = symToAux(sym) 7484 v.AddArg3(base, val, mem) 7485 return true 7486 } 7487 // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7488 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 7489 // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7490 for { 7491 off1 := auxIntToInt32(v.AuxInt) 7492 sym1 := auxToSym(v.Aux) 7493 if v_0.Op != OpAMD64LEAQ { 7494 break 7495 } 7496 off2 := auxIntToInt32(v_0.AuxInt) 7497 sym2 := auxToSym(v_0.Aux) 7498 base := v_0.Args[0] 7499 val := v_1 7500 mem := v_2 7501 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 7502 break 7503 } 7504 v.reset(OpAMD64CMPQload) 7505 v.AuxInt = int32ToAuxInt(off1 + off2) 7506 v.Aux = symToAux(mergeSym(sym1, sym2)) 7507 v.AddArg3(base, val, mem) 7508 return true 7509 } 7510 // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) 7511 // cond: validVal(c) 7512 // result: (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) 7513 for { 7514 off := auxIntToInt32(v.AuxInt) 7515 sym := auxToSym(v.Aux) 7516 ptr := v_0 7517 if v_1.Op != OpAMD64MOVQconst { 7518 break 7519 } 7520 c := auxIntToInt64(v_1.AuxInt) 7521 mem := v_2 7522 if !(validVal(c)) { 7523 break 7524 } 7525 v.reset(OpAMD64CMPQconstload) 7526 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 7527 v.Aux = symToAux(sym) 7528 v.AddArg2(ptr, mem) 7529 return true 7530 } 7531 return false 7532 } 7533 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { 7534 v_1 := v.Args[1] 7535 v_0 := v.Args[0] 7536 b := v.Block 7537 // match: (CMPW x (MOVLconst [c])) 7538 // result: (CMPWconst x [int16(c)]) 7539 for { 7540 x := v_0 7541 if v_1.Op != OpAMD64MOVLconst { 7542 break 7543 } 7544 c := auxIntToInt32(v_1.AuxInt) 7545 v.reset(OpAMD64CMPWconst) 7546 v.AuxInt = int16ToAuxInt(int16(c)) 7547 v.AddArg(x) 7548 return true 7549 } 7550 // match: (CMPW (MOVLconst [c]) x) 7551 // result: (InvertFlags (CMPWconst x [int16(c)])) 7552 for { 7553 if v_0.Op != OpAMD64MOVLconst { 7554 break 7555 } 7556 c := auxIntToInt32(v_0.AuxInt) 7557 x := v_1 7558 v.reset(OpAMD64InvertFlags) 7559 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 7560 v0.AuxInt = int16ToAuxInt(int16(c)) 7561 v0.AddArg(x) 7562 v.AddArg(v0) 7563 return true 7564 } 7565 // match: (CMPW x y) 7566 // cond: canonLessThan(x,y) 7567 // result: (InvertFlags (CMPW y x)) 7568 for { 7569 x := v_0 7570 y := v_1 7571 if !(canonLessThan(x, y)) { 7572 break 7573 } 7574 v.reset(OpAMD64InvertFlags) 7575 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 7576 v0.AddArg2(y, x) 7577 v.AddArg(v0) 7578 return true 7579 } 7580 // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x) 7581 // cond: canMergeLoad(v, l) && clobber(l) 7582 // result: (CMPWload {sym} [off] ptr x mem) 7583 for { 7584 l := v_0 7585 if l.Op != OpAMD64MOVWload { 7586 break 7587 } 7588 off := auxIntToInt32(l.AuxInt) 7589 sym := auxToSym(l.Aux) 7590 mem := l.Args[1] 7591 ptr := l.Args[0] 7592 x := v_1 7593 if !(canMergeLoad(v, l) && clobber(l)) { 7594 break 7595 } 7596 v.reset(OpAMD64CMPWload) 7597 v.AuxInt = int32ToAuxInt(off) 7598 v.Aux = symToAux(sym) 7599 v.AddArg3(ptr, x, mem) 7600 return true 7601 } 7602 // match: (CMPW x l:(MOVWload {sym} [off] ptr mem)) 7603 // cond: canMergeLoad(v, l) && clobber(l) 7604 // result: (InvertFlags (CMPWload {sym} [off] ptr x mem)) 7605 for { 7606 x := v_0 7607 l := v_1 7608 if l.Op != OpAMD64MOVWload { 7609 break 7610 } 7611 off := auxIntToInt32(l.AuxInt) 7612 sym := auxToSym(l.Aux) 7613 mem := l.Args[1] 7614 ptr := l.Args[0] 7615 if !(canMergeLoad(v, l) && clobber(l)) { 7616 break 7617 } 7618 v.reset(OpAMD64InvertFlags) 7619 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags) 7620 v0.AuxInt = int32ToAuxInt(off) 7621 v0.Aux = symToAux(sym) 7622 v0.AddArg3(ptr, x, mem) 7623 v.AddArg(v0) 7624 return true 7625 } 7626 return false 7627 } 7628 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { 7629 v_0 := v.Args[0] 7630 b := v.Block 7631 // match: (CMPWconst (MOVLconst [x]) [y]) 7632 // cond: int16(x)==y 7633 // result: (FlagEQ) 7634 for { 7635 y := auxIntToInt16(v.AuxInt) 7636 if v_0.Op != OpAMD64MOVLconst { 7637 break 7638 } 7639 x := auxIntToInt32(v_0.AuxInt) 7640 if !(int16(x) == y) { 7641 break 7642 } 7643 v.reset(OpAMD64FlagEQ) 7644 return true 7645 } 7646 // match: (CMPWconst (MOVLconst [x]) [y]) 7647 // cond: int16(x)<y && uint16(x)<uint16(y) 7648 // result: (FlagLT_ULT) 7649 for { 7650 y := auxIntToInt16(v.AuxInt) 7651 if v_0.Op != OpAMD64MOVLconst { 7652 break 7653 } 7654 x := auxIntToInt32(v_0.AuxInt) 7655 if !(int16(x) < y && uint16(x) < uint16(y)) { 7656 break 7657 } 7658 v.reset(OpAMD64FlagLT_ULT) 7659 return true 7660 } 7661 // match: (CMPWconst (MOVLconst [x]) [y]) 7662 // cond: int16(x)<y && uint16(x)>uint16(y) 7663 // result: (FlagLT_UGT) 7664 for { 7665 y := auxIntToInt16(v.AuxInt) 7666 if v_0.Op != OpAMD64MOVLconst { 7667 break 7668 } 7669 x := auxIntToInt32(v_0.AuxInt) 7670 if !(int16(x) < y && uint16(x) > uint16(y)) { 7671 break 7672 } 7673 v.reset(OpAMD64FlagLT_UGT) 7674 return true 7675 } 7676 // match: (CMPWconst (MOVLconst [x]) [y]) 7677 // cond: int16(x)>y && uint16(x)<uint16(y) 7678 // result: (FlagGT_ULT) 7679 for { 7680 y := auxIntToInt16(v.AuxInt) 7681 if v_0.Op != OpAMD64MOVLconst { 7682 break 7683 } 7684 x := auxIntToInt32(v_0.AuxInt) 7685 if !(int16(x) > y && uint16(x) < uint16(y)) { 7686 break 7687 } 7688 v.reset(OpAMD64FlagGT_ULT) 7689 return true 7690 } 7691 // match: (CMPWconst (MOVLconst [x]) [y]) 7692 // cond: int16(x)>y && uint16(x)>uint16(y) 7693 // result: (FlagGT_UGT) 7694 for { 7695 y := auxIntToInt16(v.AuxInt) 7696 if v_0.Op != OpAMD64MOVLconst { 7697 break 7698 } 7699 x := auxIntToInt32(v_0.AuxInt) 7700 if !(int16(x) > y && uint16(x) > uint16(y)) { 7701 break 7702 } 7703 v.reset(OpAMD64FlagGT_UGT) 7704 return true 7705 } 7706 // match: (CMPWconst (ANDLconst _ [m]) [n]) 7707 // cond: 0 <= int16(m) && int16(m) < n 7708 // result: (FlagLT_ULT) 7709 for { 7710 n := auxIntToInt16(v.AuxInt) 7711 if v_0.Op != OpAMD64ANDLconst { 7712 break 7713 } 7714 m := auxIntToInt32(v_0.AuxInt) 7715 if !(0 <= int16(m) && int16(m) < n) { 7716 break 7717 } 7718 v.reset(OpAMD64FlagLT_ULT) 7719 return true 7720 } 7721 // match: (CMPWconst a:(ANDL x y) [0]) 7722 // cond: a.Uses == 1 7723 // result: (TESTW x y) 7724 for { 7725 if auxIntToInt16(v.AuxInt) != 0 { 7726 break 7727 } 7728 a := v_0 7729 if a.Op != OpAMD64ANDL { 7730 break 7731 } 7732 y := a.Args[1] 7733 x := a.Args[0] 7734 if !(a.Uses == 1) { 7735 break 7736 } 7737 v.reset(OpAMD64TESTW) 7738 v.AddArg2(x, y) 7739 return true 7740 } 7741 // match: (CMPWconst a:(ANDLconst [c] x) [0]) 7742 // cond: a.Uses == 1 7743 // result: (TESTWconst [int16(c)] x) 7744 for { 7745 if auxIntToInt16(v.AuxInt) != 0 { 7746 break 7747 } 7748 a := v_0 7749 if a.Op != OpAMD64ANDLconst { 7750 break 7751 } 7752 c := auxIntToInt32(a.AuxInt) 7753 x := a.Args[0] 7754 if !(a.Uses == 1) { 7755 break 7756 } 7757 v.reset(OpAMD64TESTWconst) 7758 v.AuxInt = int16ToAuxInt(int16(c)) 7759 v.AddArg(x) 7760 return true 7761 } 7762 // match: (CMPWconst x [0]) 7763 // result: (TESTW x x) 7764 for { 7765 if auxIntToInt16(v.AuxInt) != 0 { 7766 break 7767 } 7768 x := v_0 7769 v.reset(OpAMD64TESTW) 7770 v.AddArg2(x, x) 7771 return true 7772 } 7773 // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) 7774 // cond: l.Uses == 1 && clobber(l) 7775 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) 7776 for { 7777 c := auxIntToInt16(v.AuxInt) 7778 l := v_0 7779 if l.Op != OpAMD64MOVWload { 7780 break 7781 } 7782 off := auxIntToInt32(l.AuxInt) 7783 sym := auxToSym(l.Aux) 7784 mem := l.Args[1] 7785 ptr := l.Args[0] 7786 if !(l.Uses == 1 && clobber(l)) { 7787 break 7788 } 7789 b = l.Block 7790 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) 7791 v.copyOf(v0) 7792 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 7793 v0.Aux = symToAux(sym) 7794 v0.AddArg2(ptr, mem) 7795 return true 7796 } 7797 return false 7798 } 7799 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool { 7800 v_1 := v.Args[1] 7801 v_0 := v.Args[0] 7802 // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 7803 // cond: ValAndOff(valoff1).canAdd32(off2) 7804 // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 7805 for { 7806 valoff1 := auxIntToValAndOff(v.AuxInt) 7807 sym := auxToSym(v.Aux) 7808 if v_0.Op != OpAMD64ADDQconst { 7809 break 7810 } 7811 off2 := auxIntToInt32(v_0.AuxInt) 7812 base := v_0.Args[0] 7813 mem := v_1 7814 if !(ValAndOff(valoff1).canAdd32(off2)) { 7815 break 7816 } 7817 v.reset(OpAMD64CMPWconstload) 7818 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 7819 v.Aux = symToAux(sym) 7820 v.AddArg2(base, mem) 7821 return true 7822 } 7823 // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 7824 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 7825 // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 7826 for { 7827 valoff1 := auxIntToValAndOff(v.AuxInt) 7828 sym1 := auxToSym(v.Aux) 7829 if v_0.Op != OpAMD64LEAQ { 7830 break 7831 } 7832 off2 := auxIntToInt32(v_0.AuxInt) 7833 sym2 := auxToSym(v_0.Aux) 7834 base := v_0.Args[0] 7835 mem := v_1 7836 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 7837 break 7838 } 7839 v.reset(OpAMD64CMPWconstload) 7840 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 7841 v.Aux = symToAux(mergeSym(sym1, sym2)) 7842 v.AddArg2(base, mem) 7843 return true 7844 } 7845 return false 7846 } 7847 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { 7848 v_2 := v.Args[2] 7849 v_1 := v.Args[1] 7850 v_0 := v.Args[0] 7851 // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem) 7852 // cond: is32Bit(int64(off1)+int64(off2)) 7853 // result: (CMPWload [off1+off2] {sym} base val mem) 7854 for { 7855 off1 := auxIntToInt32(v.AuxInt) 7856 sym := auxToSym(v.Aux) 7857 if v_0.Op != OpAMD64ADDQconst { 7858 break 7859 } 7860 off2 := auxIntToInt32(v_0.AuxInt) 7861 base := v_0.Args[0] 7862 val := v_1 7863 mem := v_2 7864 if !(is32Bit(int64(off1) + int64(off2))) { 7865 break 7866 } 7867 v.reset(OpAMD64CMPWload) 7868 v.AuxInt = int32ToAuxInt(off1 + off2) 7869 v.Aux = symToAux(sym) 7870 v.AddArg3(base, val, mem) 7871 return true 7872 } 7873 // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7874 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 7875 // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7876 for { 7877 off1 := auxIntToInt32(v.AuxInt) 7878 sym1 := auxToSym(v.Aux) 7879 if v_0.Op != OpAMD64LEAQ { 7880 break 7881 } 7882 off2 := auxIntToInt32(v_0.AuxInt) 7883 sym2 := auxToSym(v_0.Aux) 7884 base := v_0.Args[0] 7885 val := v_1 7886 mem := v_2 7887 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 7888 break 7889 } 7890 v.reset(OpAMD64CMPWload) 7891 v.AuxInt = int32ToAuxInt(off1 + off2) 7892 v.Aux = symToAux(mergeSym(sym1, sym2)) 7893 v.AddArg3(base, val, mem) 7894 return true 7895 } 7896 // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) 7897 // result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem) 7898 for { 7899 off := auxIntToInt32(v.AuxInt) 7900 sym := auxToSym(v.Aux) 7901 ptr := v_0 7902 if v_1.Op != OpAMD64MOVLconst { 7903 break 7904 } 7905 c := auxIntToInt32(v_1.AuxInt) 7906 mem := v_2 7907 v.reset(OpAMD64CMPWconstload) 7908 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) 7909 v.Aux = symToAux(sym) 7910 v.AddArg2(ptr, mem) 7911 return true 7912 } 7913 return false 7914 } 7915 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool { 7916 v_3 := v.Args[3] 7917 v_2 := v.Args[2] 7918 v_1 := v.Args[1] 7919 v_0 := v.Args[0] 7920 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 7921 // cond: is32Bit(int64(off1)+int64(off2)) 7922 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 7923 for { 7924 off1 := auxIntToInt32(v.AuxInt) 7925 sym := auxToSym(v.Aux) 7926 if v_0.Op != OpAMD64ADDQconst { 7927 break 7928 } 7929 off2 := auxIntToInt32(v_0.AuxInt) 7930 ptr := v_0.Args[0] 7931 old := v_1 7932 new_ := v_2 7933 mem := v_3 7934 if !(is32Bit(int64(off1) + int64(off2))) { 7935 break 7936 } 7937 v.reset(OpAMD64CMPXCHGLlock) 7938 v.AuxInt = int32ToAuxInt(off1 + off2) 7939 v.Aux = symToAux(sym) 7940 v.AddArg4(ptr, old, new_, mem) 7941 return true 7942 } 7943 return false 7944 } 7945 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool { 7946 v_3 := v.Args[3] 7947 v_2 := v.Args[2] 7948 v_1 := v.Args[1] 7949 v_0 := v.Args[0] 7950 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 7951 // cond: is32Bit(int64(off1)+int64(off2)) 7952 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 7953 for { 7954 off1 := auxIntToInt32(v.AuxInt) 7955 sym := auxToSym(v.Aux) 7956 if v_0.Op != OpAMD64ADDQconst { 7957 break 7958 } 7959 off2 := auxIntToInt32(v_0.AuxInt) 7960 ptr := v_0.Args[0] 7961 old := v_1 7962 new_ := v_2 7963 mem := v_3 7964 if !(is32Bit(int64(off1) + int64(off2))) { 7965 break 7966 } 7967 v.reset(OpAMD64CMPXCHGQlock) 7968 v.AuxInt = int32ToAuxInt(off1 + off2) 7969 v.Aux = symToAux(sym) 7970 v.AddArg4(ptr, old, new_, mem) 7971 return true 7972 } 7973 return false 7974 } 7975 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool { 7976 v_1 := v.Args[1] 7977 v_0 := v.Args[0] 7978 // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem)) 7979 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 7980 // result: (DIVSDload x [off] {sym} ptr mem) 7981 for { 7982 x := v_0 7983 l := v_1 7984 if l.Op != OpAMD64MOVSDload { 7985 break 7986 } 7987 off := auxIntToInt32(l.AuxInt) 7988 sym := auxToSym(l.Aux) 7989 mem := l.Args[1] 7990 ptr := l.Args[0] 7991 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 7992 break 7993 } 7994 v.reset(OpAMD64DIVSDload) 7995 v.AuxInt = int32ToAuxInt(off) 7996 v.Aux = symToAux(sym) 7997 v.AddArg3(x, ptr, mem) 7998 return true 7999 } 8000 return false 8001 } 8002 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool { 8003 v_2 := v.Args[2] 8004 v_1 := v.Args[1] 8005 v_0 := v.Args[0] 8006 // match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem) 8007 // cond: is32Bit(int64(off1)+int64(off2)) 8008 // result: (DIVSDload [off1+off2] {sym} val base mem) 8009 for { 8010 off1 := auxIntToInt32(v.AuxInt) 8011 sym := auxToSym(v.Aux) 8012 val := v_0 8013 if v_1.Op != OpAMD64ADDQconst { 8014 break 8015 } 8016 off2 := auxIntToInt32(v_1.AuxInt) 8017 base := v_1.Args[0] 8018 mem := v_2 8019 if !(is32Bit(int64(off1) + int64(off2))) { 8020 break 8021 } 8022 v.reset(OpAMD64DIVSDload) 8023 v.AuxInt = int32ToAuxInt(off1 + off2) 8024 v.Aux = symToAux(sym) 8025 v.AddArg3(val, base, mem) 8026 return true 8027 } 8028 // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 8029 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8030 // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 8031 for { 8032 off1 := auxIntToInt32(v.AuxInt) 8033 sym1 := auxToSym(v.Aux) 8034 val := v_0 8035 if v_1.Op != OpAMD64LEAQ { 8036 break 8037 } 8038 off2 := auxIntToInt32(v_1.AuxInt) 8039 sym2 := auxToSym(v_1.Aux) 8040 base := v_1.Args[0] 8041 mem := v_2 8042 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8043 break 8044 } 8045 v.reset(OpAMD64DIVSDload) 8046 v.AuxInt = int32ToAuxInt(off1 + off2) 8047 v.Aux = symToAux(mergeSym(sym1, sym2)) 8048 v.AddArg3(val, base, mem) 8049 return true 8050 } 8051 return false 8052 } 8053 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool { 8054 v_1 := v.Args[1] 8055 v_0 := v.Args[0] 8056 // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem)) 8057 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 8058 // result: (DIVSSload x [off] {sym} ptr mem) 8059 for { 8060 x := v_0 8061 l := v_1 8062 if l.Op != OpAMD64MOVSSload { 8063 break 8064 } 8065 off := auxIntToInt32(l.AuxInt) 8066 sym := auxToSym(l.Aux) 8067 mem := l.Args[1] 8068 ptr := l.Args[0] 8069 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 8070 break 8071 } 8072 v.reset(OpAMD64DIVSSload) 8073 v.AuxInt = int32ToAuxInt(off) 8074 v.Aux = symToAux(sym) 8075 v.AddArg3(x, ptr, mem) 8076 return true 8077 } 8078 return false 8079 } 8080 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool { 8081 v_2 := v.Args[2] 8082 v_1 := v.Args[1] 8083 v_0 := v.Args[0] 8084 // match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem) 8085 // cond: is32Bit(int64(off1)+int64(off2)) 8086 // result: (DIVSSload [off1+off2] {sym} val base mem) 8087 for { 8088 off1 := auxIntToInt32(v.AuxInt) 8089 sym := auxToSym(v.Aux) 8090 val := v_0 8091 if v_1.Op != OpAMD64ADDQconst { 8092 break 8093 } 8094 off2 := auxIntToInt32(v_1.AuxInt) 8095 base := v_1.Args[0] 8096 mem := v_2 8097 if !(is32Bit(int64(off1) + int64(off2))) { 8098 break 8099 } 8100 v.reset(OpAMD64DIVSSload) 8101 v.AuxInt = int32ToAuxInt(off1 + off2) 8102 v.Aux = symToAux(sym) 8103 v.AddArg3(val, base, mem) 8104 return true 8105 } 8106 // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 8107 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8108 // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 8109 for { 8110 off1 := auxIntToInt32(v.AuxInt) 8111 sym1 := auxToSym(v.Aux) 8112 val := v_0 8113 if v_1.Op != OpAMD64LEAQ { 8114 break 8115 } 8116 off2 := auxIntToInt32(v_1.AuxInt) 8117 sym2 := auxToSym(v_1.Aux) 8118 base := v_1.Args[0] 8119 mem := v_2 8120 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8121 break 8122 } 8123 v.reset(OpAMD64DIVSSload) 8124 v.AuxInt = int32ToAuxInt(off1 + off2) 8125 v.Aux = symToAux(mergeSym(sym1, sym2)) 8126 v.AddArg3(val, base, mem) 8127 return true 8128 } 8129 return false 8130 } 8131 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool { 8132 v_1 := v.Args[1] 8133 v_0 := v.Args[0] 8134 // match: (HMULL x y) 8135 // cond: !x.rematerializeable() && y.rematerializeable() 8136 // result: (HMULL y x) 8137 for { 8138 x := v_0 8139 y := v_1 8140 if !(!x.rematerializeable() && y.rematerializeable()) { 8141 break 8142 } 8143 v.reset(OpAMD64HMULL) 8144 v.AddArg2(y, x) 8145 return true 8146 } 8147 return false 8148 } 8149 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool { 8150 v_1 := v.Args[1] 8151 v_0 := v.Args[0] 8152 // match: (HMULLU x y) 8153 // cond: !x.rematerializeable() && y.rematerializeable() 8154 // result: (HMULLU y x) 8155 for { 8156 x := v_0 8157 y := v_1 8158 if !(!x.rematerializeable() && y.rematerializeable()) { 8159 break 8160 } 8161 v.reset(OpAMD64HMULLU) 8162 v.AddArg2(y, x) 8163 return true 8164 } 8165 return false 8166 } 8167 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool { 8168 v_1 := v.Args[1] 8169 v_0 := v.Args[0] 8170 // match: (HMULQ x y) 8171 // cond: !x.rematerializeable() && y.rematerializeable() 8172 // result: (HMULQ y x) 8173 for { 8174 x := v_0 8175 y := v_1 8176 if !(!x.rematerializeable() && y.rematerializeable()) { 8177 break 8178 } 8179 v.reset(OpAMD64HMULQ) 8180 v.AddArg2(y, x) 8181 return true 8182 } 8183 return false 8184 } 8185 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool { 8186 v_1 := v.Args[1] 8187 v_0 := v.Args[0] 8188 // match: (HMULQU x y) 8189 // cond: !x.rematerializeable() && y.rematerializeable() 8190 // result: (HMULQU y x) 8191 for { 8192 x := v_0 8193 y := v_1 8194 if !(!x.rematerializeable() && y.rematerializeable()) { 8195 break 8196 } 8197 v.reset(OpAMD64HMULQU) 8198 v.AddArg2(y, x) 8199 return true 8200 } 8201 return false 8202 } 8203 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool { 8204 v_0 := v.Args[0] 8205 // match: (LEAL [c] {s} (ADDLconst [d] x)) 8206 // cond: is32Bit(int64(c)+int64(d)) 8207 // result: (LEAL [c+d] {s} x) 8208 for { 8209 c := auxIntToInt32(v.AuxInt) 8210 s := auxToSym(v.Aux) 8211 if v_0.Op != OpAMD64ADDLconst { 8212 break 8213 } 8214 d := auxIntToInt32(v_0.AuxInt) 8215 x := v_0.Args[0] 8216 if !(is32Bit(int64(c) + int64(d))) { 8217 break 8218 } 8219 v.reset(OpAMD64LEAL) 8220 v.AuxInt = int32ToAuxInt(c + d) 8221 v.Aux = symToAux(s) 8222 v.AddArg(x) 8223 return true 8224 } 8225 // match: (LEAL [c] {s} (ADDL x y)) 8226 // cond: x.Op != OpSB && y.Op != OpSB 8227 // result: (LEAL1 [c] {s} x y) 8228 for { 8229 c := auxIntToInt32(v.AuxInt) 8230 s := auxToSym(v.Aux) 8231 if v_0.Op != OpAMD64ADDL { 8232 break 8233 } 8234 _ = v_0.Args[1] 8235 v_0_0 := v_0.Args[0] 8236 v_0_1 := v_0.Args[1] 8237 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 8238 x := v_0_0 8239 y := v_0_1 8240 if !(x.Op != OpSB && y.Op != OpSB) { 8241 continue 8242 } 8243 v.reset(OpAMD64LEAL1) 8244 v.AuxInt = int32ToAuxInt(c) 8245 v.Aux = symToAux(s) 8246 v.AddArg2(x, y) 8247 return true 8248 } 8249 break 8250 } 8251 return false 8252 } 8253 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { 8254 v_1 := v.Args[1] 8255 v_0 := v.Args[0] 8256 // match: (LEAL1 [c] {s} (ADDLconst [d] x) y) 8257 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 8258 // result: (LEAL1 [c+d] {s} x y) 8259 for { 8260 c := auxIntToInt32(v.AuxInt) 8261 s := auxToSym(v.Aux) 8262 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8263 if v_0.Op != OpAMD64ADDLconst { 8264 continue 8265 } 8266 d := auxIntToInt32(v_0.AuxInt) 8267 x := v_0.Args[0] 8268 y := v_1 8269 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 8270 continue 8271 } 8272 v.reset(OpAMD64LEAL1) 8273 v.AuxInt = int32ToAuxInt(c + d) 8274 v.Aux = symToAux(s) 8275 v.AddArg2(x, y) 8276 return true 8277 } 8278 break 8279 } 8280 // match: (LEAL1 [c] {s} x (SHLLconst [1] y)) 8281 // result: (LEAL2 [c] {s} x y) 8282 for { 8283 c := auxIntToInt32(v.AuxInt) 8284 s := auxToSym(v.Aux) 8285 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8286 x := v_0 8287 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { 8288 continue 8289 } 8290 y := v_1.Args[0] 8291 v.reset(OpAMD64LEAL2) 8292 v.AuxInt = int32ToAuxInt(c) 8293 v.Aux = symToAux(s) 8294 v.AddArg2(x, y) 8295 return true 8296 } 8297 break 8298 } 8299 // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) 8300 // result: (LEAL4 [c] {s} x y) 8301 for { 8302 c := auxIntToInt32(v.AuxInt) 8303 s := auxToSym(v.Aux) 8304 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8305 x := v_0 8306 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 { 8307 continue 8308 } 8309 y := v_1.Args[0] 8310 v.reset(OpAMD64LEAL4) 8311 v.AuxInt = int32ToAuxInt(c) 8312 v.Aux = symToAux(s) 8313 v.AddArg2(x, y) 8314 return true 8315 } 8316 break 8317 } 8318 // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) 8319 // result: (LEAL8 [c] {s} x y) 8320 for { 8321 c := auxIntToInt32(v.AuxInt) 8322 s := auxToSym(v.Aux) 8323 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8324 x := v_0 8325 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 { 8326 continue 8327 } 8328 y := v_1.Args[0] 8329 v.reset(OpAMD64LEAL8) 8330 v.AuxInt = int32ToAuxInt(c) 8331 v.Aux = symToAux(s) 8332 v.AddArg2(x, y) 8333 return true 8334 } 8335 break 8336 } 8337 return false 8338 } 8339 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool { 8340 v_1 := v.Args[1] 8341 v_0 := v.Args[0] 8342 // match: (LEAL2 [c] {s} (ADDLconst [d] x) y) 8343 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 8344 // result: (LEAL2 [c+d] {s} x y) 8345 for { 8346 c := auxIntToInt32(v.AuxInt) 8347 s := auxToSym(v.Aux) 8348 if v_0.Op != OpAMD64ADDLconst { 8349 break 8350 } 8351 d := auxIntToInt32(v_0.AuxInt) 8352 x := v_0.Args[0] 8353 y := v_1 8354 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 8355 break 8356 } 8357 v.reset(OpAMD64LEAL2) 8358 v.AuxInt = int32ToAuxInt(c + d) 8359 v.Aux = symToAux(s) 8360 v.AddArg2(x, y) 8361 return true 8362 } 8363 // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) 8364 // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB 8365 // result: (LEAL2 [c+2*d] {s} x y) 8366 for { 8367 c := auxIntToInt32(v.AuxInt) 8368 s := auxToSym(v.Aux) 8369 x := v_0 8370 if v_1.Op != OpAMD64ADDLconst { 8371 break 8372 } 8373 d := auxIntToInt32(v_1.AuxInt) 8374 y := v_1.Args[0] 8375 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) { 8376 break 8377 } 8378 v.reset(OpAMD64LEAL2) 8379 v.AuxInt = int32ToAuxInt(c + 2*d) 8380 v.Aux = symToAux(s) 8381 v.AddArg2(x, y) 8382 return true 8383 } 8384 // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) 8385 // result: (LEAL4 [c] {s} x y) 8386 for { 8387 c := auxIntToInt32(v.AuxInt) 8388 s := auxToSym(v.Aux) 8389 x := v_0 8390 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { 8391 break 8392 } 8393 y := v_1.Args[0] 8394 v.reset(OpAMD64LEAL4) 8395 v.AuxInt = int32ToAuxInt(c) 8396 v.Aux = symToAux(s) 8397 v.AddArg2(x, y) 8398 return true 8399 } 8400 // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) 8401 // result: (LEAL8 [c] {s} x y) 8402 for { 8403 c := auxIntToInt32(v.AuxInt) 8404 s := auxToSym(v.Aux) 8405 x := v_0 8406 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 { 8407 break 8408 } 8409 y := v_1.Args[0] 8410 v.reset(OpAMD64LEAL8) 8411 v.AuxInt = int32ToAuxInt(c) 8412 v.Aux = symToAux(s) 8413 v.AddArg2(x, y) 8414 return true 8415 } 8416 return false 8417 } 8418 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool { 8419 v_1 := v.Args[1] 8420 v_0 := v.Args[0] 8421 // match: (LEAL4 [c] {s} (ADDLconst [d] x) y) 8422 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 8423 // result: (LEAL4 [c+d] {s} x y) 8424 for { 8425 c := auxIntToInt32(v.AuxInt) 8426 s := auxToSym(v.Aux) 8427 if v_0.Op != OpAMD64ADDLconst { 8428 break 8429 } 8430 d := auxIntToInt32(v_0.AuxInt) 8431 x := v_0.Args[0] 8432 y := v_1 8433 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 8434 break 8435 } 8436 v.reset(OpAMD64LEAL4) 8437 v.AuxInt = int32ToAuxInt(c + d) 8438 v.Aux = symToAux(s) 8439 v.AddArg2(x, y) 8440 return true 8441 } 8442 // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) 8443 // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB 8444 // result: (LEAL4 [c+4*d] {s} x y) 8445 for { 8446 c := auxIntToInt32(v.AuxInt) 8447 s := auxToSym(v.Aux) 8448 x := v_0 8449 if v_1.Op != OpAMD64ADDLconst { 8450 break 8451 } 8452 d := auxIntToInt32(v_1.AuxInt) 8453 y := v_1.Args[0] 8454 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) { 8455 break 8456 } 8457 v.reset(OpAMD64LEAL4) 8458 v.AuxInt = int32ToAuxInt(c + 4*d) 8459 v.Aux = symToAux(s) 8460 v.AddArg2(x, y) 8461 return true 8462 } 8463 // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) 8464 // result: (LEAL8 [c] {s} x y) 8465 for { 8466 c := auxIntToInt32(v.AuxInt) 8467 s := auxToSym(v.Aux) 8468 x := v_0 8469 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { 8470 break 8471 } 8472 y := v_1.Args[0] 8473 v.reset(OpAMD64LEAL8) 8474 v.AuxInt = int32ToAuxInt(c) 8475 v.Aux = symToAux(s) 8476 v.AddArg2(x, y) 8477 return true 8478 } 8479 return false 8480 } 8481 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool { 8482 v_1 := v.Args[1] 8483 v_0 := v.Args[0] 8484 // match: (LEAL8 [c] {s} (ADDLconst [d] x) y) 8485 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 8486 // result: (LEAL8 [c+d] {s} x y) 8487 for { 8488 c := auxIntToInt32(v.AuxInt) 8489 s := auxToSym(v.Aux) 8490 if v_0.Op != OpAMD64ADDLconst { 8491 break 8492 } 8493 d := auxIntToInt32(v_0.AuxInt) 8494 x := v_0.Args[0] 8495 y := v_1 8496 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 8497 break 8498 } 8499 v.reset(OpAMD64LEAL8) 8500 v.AuxInt = int32ToAuxInt(c + d) 8501 v.Aux = symToAux(s) 8502 v.AddArg2(x, y) 8503 return true 8504 } 8505 // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) 8506 // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB 8507 // result: (LEAL8 [c+8*d] {s} x y) 8508 for { 8509 c := auxIntToInt32(v.AuxInt) 8510 s := auxToSym(v.Aux) 8511 x := v_0 8512 if v_1.Op != OpAMD64ADDLconst { 8513 break 8514 } 8515 d := auxIntToInt32(v_1.AuxInt) 8516 y := v_1.Args[0] 8517 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) { 8518 break 8519 } 8520 v.reset(OpAMD64LEAL8) 8521 v.AuxInt = int32ToAuxInt(c + 8*d) 8522 v.Aux = symToAux(s) 8523 v.AddArg2(x, y) 8524 return true 8525 } 8526 return false 8527 } 8528 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { 8529 v_0 := v.Args[0] 8530 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 8531 // cond: is32Bit(int64(c)+int64(d)) 8532 // result: (LEAQ [c+d] {s} x) 8533 for { 8534 c := auxIntToInt32(v.AuxInt) 8535 s := auxToSym(v.Aux) 8536 if v_0.Op != OpAMD64ADDQconst { 8537 break 8538 } 8539 d := auxIntToInt32(v_0.AuxInt) 8540 x := v_0.Args[0] 8541 if !(is32Bit(int64(c) + int64(d))) { 8542 break 8543 } 8544 v.reset(OpAMD64LEAQ) 8545 v.AuxInt = int32ToAuxInt(c + d) 8546 v.Aux = symToAux(s) 8547 v.AddArg(x) 8548 return true 8549 } 8550 // match: (LEAQ [c] {s} (ADDQ x y)) 8551 // cond: x.Op != OpSB && y.Op != OpSB 8552 // result: (LEAQ1 [c] {s} x y) 8553 for { 8554 c := auxIntToInt32(v.AuxInt) 8555 s := auxToSym(v.Aux) 8556 if v_0.Op != OpAMD64ADDQ { 8557 break 8558 } 8559 _ = v_0.Args[1] 8560 v_0_0 := v_0.Args[0] 8561 v_0_1 := v_0.Args[1] 8562 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 8563 x := v_0_0 8564 y := v_0_1 8565 if !(x.Op != OpSB && y.Op != OpSB) { 8566 continue 8567 } 8568 v.reset(OpAMD64LEAQ1) 8569 v.AuxInt = int32ToAuxInt(c) 8570 v.Aux = symToAux(s) 8571 v.AddArg2(x, y) 8572 return true 8573 } 8574 break 8575 } 8576 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 8577 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8578 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 8579 for { 8580 off1 := auxIntToInt32(v.AuxInt) 8581 sym1 := auxToSym(v.Aux) 8582 if v_0.Op != OpAMD64LEAQ { 8583 break 8584 } 8585 off2 := auxIntToInt32(v_0.AuxInt) 8586 sym2 := auxToSym(v_0.Aux) 8587 x := v_0.Args[0] 8588 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8589 break 8590 } 8591 v.reset(OpAMD64LEAQ) 8592 v.AuxInt = int32ToAuxInt(off1 + off2) 8593 v.Aux = symToAux(mergeSym(sym1, sym2)) 8594 v.AddArg(x) 8595 return true 8596 } 8597 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 8598 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8599 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 8600 for { 8601 off1 := auxIntToInt32(v.AuxInt) 8602 sym1 := auxToSym(v.Aux) 8603 if v_0.Op != OpAMD64LEAQ1 { 8604 break 8605 } 8606 off2 := auxIntToInt32(v_0.AuxInt) 8607 sym2 := auxToSym(v_0.Aux) 8608 y := v_0.Args[1] 8609 x := v_0.Args[0] 8610 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8611 break 8612 } 8613 v.reset(OpAMD64LEAQ1) 8614 v.AuxInt = int32ToAuxInt(off1 + off2) 8615 v.Aux = symToAux(mergeSym(sym1, sym2)) 8616 v.AddArg2(x, y) 8617 return true 8618 } 8619 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 8620 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8621 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 8622 for { 8623 off1 := auxIntToInt32(v.AuxInt) 8624 sym1 := auxToSym(v.Aux) 8625 if v_0.Op != OpAMD64LEAQ2 { 8626 break 8627 } 8628 off2 := auxIntToInt32(v_0.AuxInt) 8629 sym2 := auxToSym(v_0.Aux) 8630 y := v_0.Args[1] 8631 x := v_0.Args[0] 8632 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8633 break 8634 } 8635 v.reset(OpAMD64LEAQ2) 8636 v.AuxInt = int32ToAuxInt(off1 + off2) 8637 v.Aux = symToAux(mergeSym(sym1, sym2)) 8638 v.AddArg2(x, y) 8639 return true 8640 } 8641 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 8642 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8643 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 8644 for { 8645 off1 := auxIntToInt32(v.AuxInt) 8646 sym1 := auxToSym(v.Aux) 8647 if v_0.Op != OpAMD64LEAQ4 { 8648 break 8649 } 8650 off2 := auxIntToInt32(v_0.AuxInt) 8651 sym2 := auxToSym(v_0.Aux) 8652 y := v_0.Args[1] 8653 x := v_0.Args[0] 8654 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8655 break 8656 } 8657 v.reset(OpAMD64LEAQ4) 8658 v.AuxInt = int32ToAuxInt(off1 + off2) 8659 v.Aux = symToAux(mergeSym(sym1, sym2)) 8660 v.AddArg2(x, y) 8661 return true 8662 } 8663 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 8664 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8665 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 8666 for { 8667 off1 := auxIntToInt32(v.AuxInt) 8668 sym1 := auxToSym(v.Aux) 8669 if v_0.Op != OpAMD64LEAQ8 { 8670 break 8671 } 8672 off2 := auxIntToInt32(v_0.AuxInt) 8673 sym2 := auxToSym(v_0.Aux) 8674 y := v_0.Args[1] 8675 x := v_0.Args[0] 8676 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8677 break 8678 } 8679 v.reset(OpAMD64LEAQ8) 8680 v.AuxInt = int32ToAuxInt(off1 + off2) 8681 v.Aux = symToAux(mergeSym(sym1, sym2)) 8682 v.AddArg2(x, y) 8683 return true 8684 } 8685 return false 8686 } 8687 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { 8688 v_1 := v.Args[1] 8689 v_0 := v.Args[0] 8690 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 8691 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 8692 // result: (LEAQ1 [c+d] {s} x y) 8693 for { 8694 c := auxIntToInt32(v.AuxInt) 8695 s := auxToSym(v.Aux) 8696 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8697 if v_0.Op != OpAMD64ADDQconst { 8698 continue 8699 } 8700 d := auxIntToInt32(v_0.AuxInt) 8701 x := v_0.Args[0] 8702 y := v_1 8703 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 8704 continue 8705 } 8706 v.reset(OpAMD64LEAQ1) 8707 v.AuxInt = int32ToAuxInt(c + d) 8708 v.Aux = symToAux(s) 8709 v.AddArg2(x, y) 8710 return true 8711 } 8712 break 8713 } 8714 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 8715 // result: (LEAQ2 [c] {s} x y) 8716 for { 8717 c := auxIntToInt32(v.AuxInt) 8718 s := auxToSym(v.Aux) 8719 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8720 x := v_0 8721 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { 8722 continue 8723 } 8724 y := v_1.Args[0] 8725 v.reset(OpAMD64LEAQ2) 8726 v.AuxInt = int32ToAuxInt(c) 8727 v.Aux = symToAux(s) 8728 v.AddArg2(x, y) 8729 return true 8730 } 8731 break 8732 } 8733 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 8734 // result: (LEAQ4 [c] {s} x y) 8735 for { 8736 c := auxIntToInt32(v.AuxInt) 8737 s := auxToSym(v.Aux) 8738 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8739 x := v_0 8740 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 { 8741 continue 8742 } 8743 y := v_1.Args[0] 8744 v.reset(OpAMD64LEAQ4) 8745 v.AuxInt = int32ToAuxInt(c) 8746 v.Aux = symToAux(s) 8747 v.AddArg2(x, y) 8748 return true 8749 } 8750 break 8751 } 8752 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 8753 // result: (LEAQ8 [c] {s} x y) 8754 for { 8755 c := auxIntToInt32(v.AuxInt) 8756 s := auxToSym(v.Aux) 8757 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8758 x := v_0 8759 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 { 8760 continue 8761 } 8762 y := v_1.Args[0] 8763 v.reset(OpAMD64LEAQ8) 8764 v.AuxInt = int32ToAuxInt(c) 8765 v.Aux = symToAux(s) 8766 v.AddArg2(x, y) 8767 return true 8768 } 8769 break 8770 } 8771 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 8772 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB 8773 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 8774 for { 8775 off1 := auxIntToInt32(v.AuxInt) 8776 sym1 := auxToSym(v.Aux) 8777 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8778 if v_0.Op != OpAMD64LEAQ { 8779 continue 8780 } 8781 off2 := auxIntToInt32(v_0.AuxInt) 8782 sym2 := auxToSym(v_0.Aux) 8783 x := v_0.Args[0] 8784 y := v_1 8785 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 8786 continue 8787 } 8788 v.reset(OpAMD64LEAQ1) 8789 v.AuxInt = int32ToAuxInt(off1 + off2) 8790 v.Aux = symToAux(mergeSym(sym1, sym2)) 8791 v.AddArg2(x, y) 8792 return true 8793 } 8794 break 8795 } 8796 // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) 8797 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8798 // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y) 8799 for { 8800 off1 := auxIntToInt32(v.AuxInt) 8801 sym1 := auxToSym(v.Aux) 8802 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8803 x := v_0 8804 if v_1.Op != OpAMD64LEAQ1 { 8805 continue 8806 } 8807 off2 := auxIntToInt32(v_1.AuxInt) 8808 sym2 := auxToSym(v_1.Aux) 8809 y := v_1.Args[1] 8810 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8811 continue 8812 } 8813 v.reset(OpAMD64LEAQ2) 8814 v.AuxInt = int32ToAuxInt(off1 + off2) 8815 v.Aux = symToAux(mergeSym(sym1, sym2)) 8816 v.AddArg2(x, y) 8817 return true 8818 } 8819 break 8820 } 8821 // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) 8822 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 8823 // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x) 8824 for { 8825 off1 := auxIntToInt32(v.AuxInt) 8826 sym1 := auxToSym(v.Aux) 8827 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 8828 x := v_0 8829 if v_1.Op != OpAMD64LEAQ1 { 8830 continue 8831 } 8832 off2 := auxIntToInt32(v_1.AuxInt) 8833 sym2 := auxToSym(v_1.Aux) 8834 _ = v_1.Args[1] 8835 v_1_0 := v_1.Args[0] 8836 v_1_1 := v_1.Args[1] 8837 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { 8838 if x != v_1_0 { 8839 continue 8840 } 8841 y := v_1_1 8842 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 8843 continue 8844 } 8845 v.reset(OpAMD64LEAQ2) 8846 v.AuxInt = int32ToAuxInt(off1 + off2) 8847 v.Aux = symToAux(mergeSym(sym1, sym2)) 8848 v.AddArg2(y, x) 8849 return true 8850 } 8851 } 8852 break 8853 } 8854 // match: (LEAQ1 [0] x y) 8855 // cond: v.Aux == nil 8856 // result: (ADDQ x y) 8857 for { 8858 if auxIntToInt32(v.AuxInt) != 0 { 8859 break 8860 } 8861 x := v_0 8862 y := v_1 8863 if !(v.Aux == nil) { 8864 break 8865 } 8866 v.reset(OpAMD64ADDQ) 8867 v.AddArg2(x, y) 8868 return true 8869 } 8870 return false 8871 } 8872 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { 8873 v_1 := v.Args[1] 8874 v_0 := v.Args[0] 8875 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 8876 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 8877 // result: (LEAQ2 [c+d] {s} x y) 8878 for { 8879 c := auxIntToInt32(v.AuxInt) 8880 s := auxToSym(v.Aux) 8881 if v_0.Op != OpAMD64ADDQconst { 8882 break 8883 } 8884 d := auxIntToInt32(v_0.AuxInt) 8885 x := v_0.Args[0] 8886 y := v_1 8887 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 8888 break 8889 } 8890 v.reset(OpAMD64LEAQ2) 8891 v.AuxInt = int32ToAuxInt(c + d) 8892 v.Aux = symToAux(s) 8893 v.AddArg2(x, y) 8894 return true 8895 } 8896 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 8897 // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB 8898 // result: (LEAQ2 [c+2*d] {s} x y) 8899 for { 8900 c := auxIntToInt32(v.AuxInt) 8901 s := auxToSym(v.Aux) 8902 x := v_0 8903 if v_1.Op != OpAMD64ADDQconst { 8904 break 8905 } 8906 d := auxIntToInt32(v_1.AuxInt) 8907 y := v_1.Args[0] 8908 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) { 8909 break 8910 } 8911 v.reset(OpAMD64LEAQ2) 8912 v.AuxInt = int32ToAuxInt(c + 2*d) 8913 v.Aux = symToAux(s) 8914 v.AddArg2(x, y) 8915 return true 8916 } 8917 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 8918 // result: (LEAQ4 [c] {s} x y) 8919 for { 8920 c := auxIntToInt32(v.AuxInt) 8921 s := auxToSym(v.Aux) 8922 x := v_0 8923 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { 8924 break 8925 } 8926 y := v_1.Args[0] 8927 v.reset(OpAMD64LEAQ4) 8928 v.AuxInt = int32ToAuxInt(c) 8929 v.Aux = symToAux(s) 8930 v.AddArg2(x, y) 8931 return true 8932 } 8933 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 8934 // result: (LEAQ8 [c] {s} x y) 8935 for { 8936 c := auxIntToInt32(v.AuxInt) 8937 s := auxToSym(v.Aux) 8938 x := v_0 8939 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 { 8940 break 8941 } 8942 y := v_1.Args[0] 8943 v.reset(OpAMD64LEAQ8) 8944 v.AuxInt = int32ToAuxInt(c) 8945 v.Aux = symToAux(s) 8946 v.AddArg2(x, y) 8947 return true 8948 } 8949 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 8950 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB 8951 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 8952 for { 8953 off1 := auxIntToInt32(v.AuxInt) 8954 sym1 := auxToSym(v.Aux) 8955 if v_0.Op != OpAMD64LEAQ { 8956 break 8957 } 8958 off2 := auxIntToInt32(v_0.AuxInt) 8959 sym2 := auxToSym(v_0.Aux) 8960 x := v_0.Args[0] 8961 y := v_1 8962 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 8963 break 8964 } 8965 v.reset(OpAMD64LEAQ2) 8966 v.AuxInt = int32ToAuxInt(off1 + off2) 8967 v.Aux = symToAux(mergeSym(sym1, sym2)) 8968 v.AddArg2(x, y) 8969 return true 8970 } 8971 // match: (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) 8972 // cond: is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil 8973 // result: (LEAQ4 [off1+2*off2] {sym1} x y) 8974 for { 8975 off1 := auxIntToInt32(v.AuxInt) 8976 sym1 := auxToSym(v.Aux) 8977 x := v_0 8978 if v_1.Op != OpAMD64LEAQ1 { 8979 break 8980 } 8981 off2 := auxIntToInt32(v_1.AuxInt) 8982 sym2 := auxToSym(v_1.Aux) 8983 y := v_1.Args[1] 8984 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) { 8985 break 8986 } 8987 v.reset(OpAMD64LEAQ4) 8988 v.AuxInt = int32ToAuxInt(off1 + 2*off2) 8989 v.Aux = symToAux(sym1) 8990 v.AddArg2(x, y) 8991 return true 8992 } 8993 // match: (LEAQ2 [off] {sym} x (MOVQconst [scale])) 8994 // cond: is32Bit(int64(off)+int64(scale)*2) 8995 // result: (LEAQ [off+int32(scale)*2] {sym} x) 8996 for { 8997 off := auxIntToInt32(v.AuxInt) 8998 sym := auxToSym(v.Aux) 8999 x := v_0 9000 if v_1.Op != OpAMD64MOVQconst { 9001 break 9002 } 9003 scale := auxIntToInt64(v_1.AuxInt) 9004 if !(is32Bit(int64(off) + int64(scale)*2)) { 9005 break 9006 } 9007 v.reset(OpAMD64LEAQ) 9008 v.AuxInt = int32ToAuxInt(off + int32(scale)*2) 9009 v.Aux = symToAux(sym) 9010 v.AddArg(x) 9011 return true 9012 } 9013 // match: (LEAQ2 [off] {sym} x (MOVLconst [scale])) 9014 // cond: is32Bit(int64(off)+int64(scale)*2) 9015 // result: (LEAQ [off+int32(scale)*2] {sym} x) 9016 for { 9017 off := auxIntToInt32(v.AuxInt) 9018 sym := auxToSym(v.Aux) 9019 x := v_0 9020 if v_1.Op != OpAMD64MOVLconst { 9021 break 9022 } 9023 scale := auxIntToInt32(v_1.AuxInt) 9024 if !(is32Bit(int64(off) + int64(scale)*2)) { 9025 break 9026 } 9027 v.reset(OpAMD64LEAQ) 9028 v.AuxInt = int32ToAuxInt(off + int32(scale)*2) 9029 v.Aux = symToAux(sym) 9030 v.AddArg(x) 9031 return true 9032 } 9033 return false 9034 } 9035 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { 9036 v_1 := v.Args[1] 9037 v_0 := v.Args[0] 9038 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 9039 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 9040 // result: (LEAQ4 [c+d] {s} x y) 9041 for { 9042 c := auxIntToInt32(v.AuxInt) 9043 s := auxToSym(v.Aux) 9044 if v_0.Op != OpAMD64ADDQconst { 9045 break 9046 } 9047 d := auxIntToInt32(v_0.AuxInt) 9048 x := v_0.Args[0] 9049 y := v_1 9050 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 9051 break 9052 } 9053 v.reset(OpAMD64LEAQ4) 9054 v.AuxInt = int32ToAuxInt(c + d) 9055 v.Aux = symToAux(s) 9056 v.AddArg2(x, y) 9057 return true 9058 } 9059 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 9060 // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB 9061 // result: (LEAQ4 [c+4*d] {s} x y) 9062 for { 9063 c := auxIntToInt32(v.AuxInt) 9064 s := auxToSym(v.Aux) 9065 x := v_0 9066 if v_1.Op != OpAMD64ADDQconst { 9067 break 9068 } 9069 d := auxIntToInt32(v_1.AuxInt) 9070 y := v_1.Args[0] 9071 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) { 9072 break 9073 } 9074 v.reset(OpAMD64LEAQ4) 9075 v.AuxInt = int32ToAuxInt(c + 4*d) 9076 v.Aux = symToAux(s) 9077 v.AddArg2(x, y) 9078 return true 9079 } 9080 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 9081 // result: (LEAQ8 [c] {s} x y) 9082 for { 9083 c := auxIntToInt32(v.AuxInt) 9084 s := auxToSym(v.Aux) 9085 x := v_0 9086 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { 9087 break 9088 } 9089 y := v_1.Args[0] 9090 v.reset(OpAMD64LEAQ8) 9091 v.AuxInt = int32ToAuxInt(c) 9092 v.Aux = symToAux(s) 9093 v.AddArg2(x, y) 9094 return true 9095 } 9096 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 9097 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB 9098 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 9099 for { 9100 off1 := auxIntToInt32(v.AuxInt) 9101 sym1 := auxToSym(v.Aux) 9102 if v_0.Op != OpAMD64LEAQ { 9103 break 9104 } 9105 off2 := auxIntToInt32(v_0.AuxInt) 9106 sym2 := auxToSym(v_0.Aux) 9107 x := v_0.Args[0] 9108 y := v_1 9109 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 9110 break 9111 } 9112 v.reset(OpAMD64LEAQ4) 9113 v.AuxInt = int32ToAuxInt(off1 + off2) 9114 v.Aux = symToAux(mergeSym(sym1, sym2)) 9115 v.AddArg2(x, y) 9116 return true 9117 } 9118 // match: (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) 9119 // cond: is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil 9120 // result: (LEAQ8 [off1+4*off2] {sym1} x y) 9121 for { 9122 off1 := auxIntToInt32(v.AuxInt) 9123 sym1 := auxToSym(v.Aux) 9124 x := v_0 9125 if v_1.Op != OpAMD64LEAQ1 { 9126 break 9127 } 9128 off2 := auxIntToInt32(v_1.AuxInt) 9129 sym2 := auxToSym(v_1.Aux) 9130 y := v_1.Args[1] 9131 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) { 9132 break 9133 } 9134 v.reset(OpAMD64LEAQ8) 9135 v.AuxInt = int32ToAuxInt(off1 + 4*off2) 9136 v.Aux = symToAux(sym1) 9137 v.AddArg2(x, y) 9138 return true 9139 } 9140 // match: (LEAQ4 [off] {sym} x (MOVQconst [scale])) 9141 // cond: is32Bit(int64(off)+int64(scale)*4) 9142 // result: (LEAQ [off+int32(scale)*4] {sym} x) 9143 for { 9144 off := auxIntToInt32(v.AuxInt) 9145 sym := auxToSym(v.Aux) 9146 x := v_0 9147 if v_1.Op != OpAMD64MOVQconst { 9148 break 9149 } 9150 scale := auxIntToInt64(v_1.AuxInt) 9151 if !(is32Bit(int64(off) + int64(scale)*4)) { 9152 break 9153 } 9154 v.reset(OpAMD64LEAQ) 9155 v.AuxInt = int32ToAuxInt(off + int32(scale)*4) 9156 v.Aux = symToAux(sym) 9157 v.AddArg(x) 9158 return true 9159 } 9160 // match: (LEAQ4 [off] {sym} x (MOVLconst [scale])) 9161 // cond: is32Bit(int64(off)+int64(scale)*4) 9162 // result: (LEAQ [off+int32(scale)*4] {sym} x) 9163 for { 9164 off := auxIntToInt32(v.AuxInt) 9165 sym := auxToSym(v.Aux) 9166 x := v_0 9167 if v_1.Op != OpAMD64MOVLconst { 9168 break 9169 } 9170 scale := auxIntToInt32(v_1.AuxInt) 9171 if !(is32Bit(int64(off) + int64(scale)*4)) { 9172 break 9173 } 9174 v.reset(OpAMD64LEAQ) 9175 v.AuxInt = int32ToAuxInt(off + int32(scale)*4) 9176 v.Aux = symToAux(sym) 9177 v.AddArg(x) 9178 return true 9179 } 9180 return false 9181 } 9182 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool { 9183 v_1 := v.Args[1] 9184 v_0 := v.Args[0] 9185 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 9186 // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB 9187 // result: (LEAQ8 [c+d] {s} x y) 9188 for { 9189 c := auxIntToInt32(v.AuxInt) 9190 s := auxToSym(v.Aux) 9191 if v_0.Op != OpAMD64ADDQconst { 9192 break 9193 } 9194 d := auxIntToInt32(v_0.AuxInt) 9195 x := v_0.Args[0] 9196 y := v_1 9197 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { 9198 break 9199 } 9200 v.reset(OpAMD64LEAQ8) 9201 v.AuxInt = int32ToAuxInt(c + d) 9202 v.Aux = symToAux(s) 9203 v.AddArg2(x, y) 9204 return true 9205 } 9206 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 9207 // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB 9208 // result: (LEAQ8 [c+8*d] {s} x y) 9209 for { 9210 c := auxIntToInt32(v.AuxInt) 9211 s := auxToSym(v.Aux) 9212 x := v_0 9213 if v_1.Op != OpAMD64ADDQconst { 9214 break 9215 } 9216 d := auxIntToInt32(v_1.AuxInt) 9217 y := v_1.Args[0] 9218 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) { 9219 break 9220 } 9221 v.reset(OpAMD64LEAQ8) 9222 v.AuxInt = int32ToAuxInt(c + 8*d) 9223 v.Aux = symToAux(s) 9224 v.AddArg2(x, y) 9225 return true 9226 } 9227 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 9228 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB 9229 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 9230 for { 9231 off1 := auxIntToInt32(v.AuxInt) 9232 sym1 := auxToSym(v.Aux) 9233 if v_0.Op != OpAMD64LEAQ { 9234 break 9235 } 9236 off2 := auxIntToInt32(v_0.AuxInt) 9237 sym2 := auxToSym(v_0.Aux) 9238 x := v_0.Args[0] 9239 y := v_1 9240 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 9241 break 9242 } 9243 v.reset(OpAMD64LEAQ8) 9244 v.AuxInt = int32ToAuxInt(off1 + off2) 9245 v.Aux = symToAux(mergeSym(sym1, sym2)) 9246 v.AddArg2(x, y) 9247 return true 9248 } 9249 // match: (LEAQ8 [off] {sym} x (MOVQconst [scale])) 9250 // cond: is32Bit(int64(off)+int64(scale)*8) 9251 // result: (LEAQ [off+int32(scale)*8] {sym} x) 9252 for { 9253 off := auxIntToInt32(v.AuxInt) 9254 sym := auxToSym(v.Aux) 9255 x := v_0 9256 if v_1.Op != OpAMD64MOVQconst { 9257 break 9258 } 9259 scale := auxIntToInt64(v_1.AuxInt) 9260 if !(is32Bit(int64(off) + int64(scale)*8)) { 9261 break 9262 } 9263 v.reset(OpAMD64LEAQ) 9264 v.AuxInt = int32ToAuxInt(off + int32(scale)*8) 9265 v.Aux = symToAux(sym) 9266 v.AddArg(x) 9267 return true 9268 } 9269 // match: (LEAQ8 [off] {sym} x (MOVLconst [scale])) 9270 // cond: is32Bit(int64(off)+int64(scale)*8) 9271 // result: (LEAQ [off+int32(scale)*8] {sym} x) 9272 for { 9273 off := auxIntToInt32(v.AuxInt) 9274 sym := auxToSym(v.Aux) 9275 x := v_0 9276 if v_1.Op != OpAMD64MOVLconst { 9277 break 9278 } 9279 scale := auxIntToInt32(v_1.AuxInt) 9280 if !(is32Bit(int64(off) + int64(scale)*8)) { 9281 break 9282 } 9283 v.reset(OpAMD64LEAQ) 9284 v.AuxInt = int32ToAuxInt(off + int32(scale)*8) 9285 v.Aux = symToAux(sym) 9286 v.AddArg(x) 9287 return true 9288 } 9289 return false 9290 } 9291 func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool { 9292 v_2 := v.Args[2] 9293 v_1 := v.Args[1] 9294 v_0 := v.Args[0] 9295 // match: (MOVBELstore [i] {s} p x:(BSWAPL w) mem) 9296 // cond: x.Uses == 1 9297 // result: (MOVLstore [i] {s} p w mem) 9298 for { 9299 i := auxIntToInt32(v.AuxInt) 9300 s := auxToSym(v.Aux) 9301 p := v_0 9302 x := v_1 9303 if x.Op != OpAMD64BSWAPL { 9304 break 9305 } 9306 w := x.Args[0] 9307 mem := v_2 9308 if !(x.Uses == 1) { 9309 break 9310 } 9311 v.reset(OpAMD64MOVLstore) 9312 v.AuxInt = int32ToAuxInt(i) 9313 v.Aux = symToAux(s) 9314 v.AddArg3(p, w, mem) 9315 return true 9316 } 9317 return false 9318 } 9319 func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool { 9320 v_2 := v.Args[2] 9321 v_1 := v.Args[1] 9322 v_0 := v.Args[0] 9323 // match: (MOVBEQstore [i] {s} p x:(BSWAPQ w) mem) 9324 // cond: x.Uses == 1 9325 // result: (MOVQstore [i] {s} p w mem) 9326 for { 9327 i := auxIntToInt32(v.AuxInt) 9328 s := auxToSym(v.Aux) 9329 p := v_0 9330 x := v_1 9331 if x.Op != OpAMD64BSWAPQ { 9332 break 9333 } 9334 w := x.Args[0] 9335 mem := v_2 9336 if !(x.Uses == 1) { 9337 break 9338 } 9339 v.reset(OpAMD64MOVQstore) 9340 v.AuxInt = int32ToAuxInt(i) 9341 v.Aux = symToAux(s) 9342 v.AddArg3(p, w, mem) 9343 return true 9344 } 9345 return false 9346 } 9347 func rewriteValueAMD64_OpAMD64MOVBEWstore(v *Value) bool { 9348 v_2 := v.Args[2] 9349 v_1 := v.Args[1] 9350 v_0 := v.Args[0] 9351 // match: (MOVBEWstore [i] {s} p x:(ROLWconst [8] w) mem) 9352 // cond: x.Uses == 1 9353 // result: (MOVWstore [i] {s} p w mem) 9354 for { 9355 i := auxIntToInt32(v.AuxInt) 9356 s := auxToSym(v.Aux) 9357 p := v_0 9358 x := v_1 9359 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 { 9360 break 9361 } 9362 w := x.Args[0] 9363 mem := v_2 9364 if !(x.Uses == 1) { 9365 break 9366 } 9367 v.reset(OpAMD64MOVWstore) 9368 v.AuxInt = int32ToAuxInt(i) 9369 v.Aux = symToAux(s) 9370 v.AddArg3(p, w, mem) 9371 return true 9372 } 9373 return false 9374 } 9375 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { 9376 v_0 := v.Args[0] 9377 b := v.Block 9378 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 9379 // cond: x.Uses == 1 && clobber(x) 9380 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 9381 for { 9382 x := v_0 9383 if x.Op != OpAMD64MOVBload { 9384 break 9385 } 9386 off := auxIntToInt32(x.AuxInt) 9387 sym := auxToSym(x.Aux) 9388 mem := x.Args[1] 9389 ptr := x.Args[0] 9390 if !(x.Uses == 1 && clobber(x)) { 9391 break 9392 } 9393 b = x.Block 9394 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 9395 v.copyOf(v0) 9396 v0.AuxInt = int32ToAuxInt(off) 9397 v0.Aux = symToAux(sym) 9398 v0.AddArg2(ptr, mem) 9399 return true 9400 } 9401 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 9402 // cond: x.Uses == 1 && clobber(x) 9403 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 9404 for { 9405 x := v_0 9406 if x.Op != OpAMD64MOVWload { 9407 break 9408 } 9409 off := auxIntToInt32(x.AuxInt) 9410 sym := auxToSym(x.Aux) 9411 mem := x.Args[1] 9412 ptr := x.Args[0] 9413 if !(x.Uses == 1 && clobber(x)) { 9414 break 9415 } 9416 b = x.Block 9417 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 9418 v.copyOf(v0) 9419 v0.AuxInt = int32ToAuxInt(off) 9420 v0.Aux = symToAux(sym) 9421 v0.AddArg2(ptr, mem) 9422 return true 9423 } 9424 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 9425 // cond: x.Uses == 1 && clobber(x) 9426 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 9427 for { 9428 x := v_0 9429 if x.Op != OpAMD64MOVLload { 9430 break 9431 } 9432 off := auxIntToInt32(x.AuxInt) 9433 sym := auxToSym(x.Aux) 9434 mem := x.Args[1] 9435 ptr := x.Args[0] 9436 if !(x.Uses == 1 && clobber(x)) { 9437 break 9438 } 9439 b = x.Block 9440 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 9441 v.copyOf(v0) 9442 v0.AuxInt = int32ToAuxInt(off) 9443 v0.Aux = symToAux(sym) 9444 v0.AddArg2(ptr, mem) 9445 return true 9446 } 9447 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 9448 // cond: x.Uses == 1 && clobber(x) 9449 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 9450 for { 9451 x := v_0 9452 if x.Op != OpAMD64MOVQload { 9453 break 9454 } 9455 off := auxIntToInt32(x.AuxInt) 9456 sym := auxToSym(x.Aux) 9457 mem := x.Args[1] 9458 ptr := x.Args[0] 9459 if !(x.Uses == 1 && clobber(x)) { 9460 break 9461 } 9462 b = x.Block 9463 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 9464 v.copyOf(v0) 9465 v0.AuxInt = int32ToAuxInt(off) 9466 v0.Aux = symToAux(sym) 9467 v0.AddArg2(ptr, mem) 9468 return true 9469 } 9470 // match: (MOVBQSX (ANDLconst [c] x)) 9471 // cond: c & 0x80 == 0 9472 // result: (ANDLconst [c & 0x7f] x) 9473 for { 9474 if v_0.Op != OpAMD64ANDLconst { 9475 break 9476 } 9477 c := auxIntToInt32(v_0.AuxInt) 9478 x := v_0.Args[0] 9479 if !(c&0x80 == 0) { 9480 break 9481 } 9482 v.reset(OpAMD64ANDLconst) 9483 v.AuxInt = int32ToAuxInt(c & 0x7f) 9484 v.AddArg(x) 9485 return true 9486 } 9487 // match: (MOVBQSX (MOVBQSX x)) 9488 // result: (MOVBQSX x) 9489 for { 9490 if v_0.Op != OpAMD64MOVBQSX { 9491 break 9492 } 9493 x := v_0.Args[0] 9494 v.reset(OpAMD64MOVBQSX) 9495 v.AddArg(x) 9496 return true 9497 } 9498 return false 9499 } 9500 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool { 9501 v_1 := v.Args[1] 9502 v_0 := v.Args[0] 9503 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 9504 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 9505 // result: (MOVBQSX x) 9506 for { 9507 off := auxIntToInt32(v.AuxInt) 9508 sym := auxToSym(v.Aux) 9509 ptr := v_0 9510 if v_1.Op != OpAMD64MOVBstore { 9511 break 9512 } 9513 off2 := auxIntToInt32(v_1.AuxInt) 9514 sym2 := auxToSym(v_1.Aux) 9515 x := v_1.Args[1] 9516 ptr2 := v_1.Args[0] 9517 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 9518 break 9519 } 9520 v.reset(OpAMD64MOVBQSX) 9521 v.AddArg(x) 9522 return true 9523 } 9524 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9525 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 9526 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9527 for { 9528 off1 := auxIntToInt32(v.AuxInt) 9529 sym1 := auxToSym(v.Aux) 9530 if v_0.Op != OpAMD64LEAQ { 9531 break 9532 } 9533 off2 := auxIntToInt32(v_0.AuxInt) 9534 sym2 := auxToSym(v_0.Aux) 9535 base := v_0.Args[0] 9536 mem := v_1 9537 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 9538 break 9539 } 9540 v.reset(OpAMD64MOVBQSXload) 9541 v.AuxInt = int32ToAuxInt(off1 + off2) 9542 v.Aux = symToAux(mergeSym(sym1, sym2)) 9543 v.AddArg2(base, mem) 9544 return true 9545 } 9546 return false 9547 } 9548 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { 9549 v_0 := v.Args[0] 9550 b := v.Block 9551 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 9552 // cond: x.Uses == 1 && clobber(x) 9553 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 9554 for { 9555 x := v_0 9556 if x.Op != OpAMD64MOVBload { 9557 break 9558 } 9559 off := auxIntToInt32(x.AuxInt) 9560 sym := auxToSym(x.Aux) 9561 mem := x.Args[1] 9562 ptr := x.Args[0] 9563 if !(x.Uses == 1 && clobber(x)) { 9564 break 9565 } 9566 b = x.Block 9567 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 9568 v.copyOf(v0) 9569 v0.AuxInt = int32ToAuxInt(off) 9570 v0.Aux = symToAux(sym) 9571 v0.AddArg2(ptr, mem) 9572 return true 9573 } 9574 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 9575 // cond: x.Uses == 1 && clobber(x) 9576 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 9577 for { 9578 x := v_0 9579 if x.Op != OpAMD64MOVWload { 9580 break 9581 } 9582 off := auxIntToInt32(x.AuxInt) 9583 sym := auxToSym(x.Aux) 9584 mem := x.Args[1] 9585 ptr := x.Args[0] 9586 if !(x.Uses == 1 && clobber(x)) { 9587 break 9588 } 9589 b = x.Block 9590 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 9591 v.copyOf(v0) 9592 v0.AuxInt = int32ToAuxInt(off) 9593 v0.Aux = symToAux(sym) 9594 v0.AddArg2(ptr, mem) 9595 return true 9596 } 9597 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 9598 // cond: x.Uses == 1 && clobber(x) 9599 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 9600 for { 9601 x := v_0 9602 if x.Op != OpAMD64MOVLload { 9603 break 9604 } 9605 off := auxIntToInt32(x.AuxInt) 9606 sym := auxToSym(x.Aux) 9607 mem := x.Args[1] 9608 ptr := x.Args[0] 9609 if !(x.Uses == 1 && clobber(x)) { 9610 break 9611 } 9612 b = x.Block 9613 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 9614 v.copyOf(v0) 9615 v0.AuxInt = int32ToAuxInt(off) 9616 v0.Aux = symToAux(sym) 9617 v0.AddArg2(ptr, mem) 9618 return true 9619 } 9620 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 9621 // cond: x.Uses == 1 && clobber(x) 9622 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 9623 for { 9624 x := v_0 9625 if x.Op != OpAMD64MOVQload { 9626 break 9627 } 9628 off := auxIntToInt32(x.AuxInt) 9629 sym := auxToSym(x.Aux) 9630 mem := x.Args[1] 9631 ptr := x.Args[0] 9632 if !(x.Uses == 1 && clobber(x)) { 9633 break 9634 } 9635 b = x.Block 9636 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 9637 v.copyOf(v0) 9638 v0.AuxInt = int32ToAuxInt(off) 9639 v0.Aux = symToAux(sym) 9640 v0.AddArg2(ptr, mem) 9641 return true 9642 } 9643 // match: (MOVBQZX x) 9644 // cond: zeroUpper56Bits(x,3) 9645 // result: x 9646 for { 9647 x := v_0 9648 if !(zeroUpper56Bits(x, 3)) { 9649 break 9650 } 9651 v.copyOf(x) 9652 return true 9653 } 9654 // match: (MOVBQZX (ANDLconst [c] x)) 9655 // result: (ANDLconst [c & 0xff] x) 9656 for { 9657 if v_0.Op != OpAMD64ANDLconst { 9658 break 9659 } 9660 c := auxIntToInt32(v_0.AuxInt) 9661 x := v_0.Args[0] 9662 v.reset(OpAMD64ANDLconst) 9663 v.AuxInt = int32ToAuxInt(c & 0xff) 9664 v.AddArg(x) 9665 return true 9666 } 9667 // match: (MOVBQZX (MOVBQZX x)) 9668 // result: (MOVBQZX x) 9669 for { 9670 if v_0.Op != OpAMD64MOVBQZX { 9671 break 9672 } 9673 x := v_0.Args[0] 9674 v.reset(OpAMD64MOVBQZX) 9675 v.AddArg(x) 9676 return true 9677 } 9678 return false 9679 } 9680 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool { 9681 v_1 := v.Args[1] 9682 v_0 := v.Args[0] 9683 // match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 9684 // cond: is32Bit(int64(off1)+int64(off2)) 9685 // result: (MOVBatomicload [off1+off2] {sym} ptr mem) 9686 for { 9687 off1 := auxIntToInt32(v.AuxInt) 9688 sym := auxToSym(v.Aux) 9689 if v_0.Op != OpAMD64ADDQconst { 9690 break 9691 } 9692 off2 := auxIntToInt32(v_0.AuxInt) 9693 ptr := v_0.Args[0] 9694 mem := v_1 9695 if !(is32Bit(int64(off1) + int64(off2))) { 9696 break 9697 } 9698 v.reset(OpAMD64MOVBatomicload) 9699 v.AuxInt = int32ToAuxInt(off1 + off2) 9700 v.Aux = symToAux(sym) 9701 v.AddArg2(ptr, mem) 9702 return true 9703 } 9704 // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 9705 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 9706 // result: (MOVBatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem) 9707 for { 9708 off1 := auxIntToInt32(v.AuxInt) 9709 sym1 := auxToSym(v.Aux) 9710 if v_0.Op != OpAMD64LEAQ { 9711 break 9712 } 9713 off2 := auxIntToInt32(v_0.AuxInt) 9714 sym2 := auxToSym(v_0.Aux) 9715 ptr := v_0.Args[0] 9716 mem := v_1 9717 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 9718 break 9719 } 9720 v.reset(OpAMD64MOVBatomicload) 9721 v.AuxInt = int32ToAuxInt(off1 + off2) 9722 v.Aux = symToAux(mergeSym(sym1, sym2)) 9723 v.AddArg2(ptr, mem) 9724 return true 9725 } 9726 return false 9727 } 9728 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { 9729 v_1 := v.Args[1] 9730 v_0 := v.Args[0] 9731 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 9732 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 9733 // result: (MOVBQZX x) 9734 for { 9735 off := auxIntToInt32(v.AuxInt) 9736 sym := auxToSym(v.Aux) 9737 ptr := v_0 9738 if v_1.Op != OpAMD64MOVBstore { 9739 break 9740 } 9741 off2 := auxIntToInt32(v_1.AuxInt) 9742 sym2 := auxToSym(v_1.Aux) 9743 x := v_1.Args[1] 9744 ptr2 := v_1.Args[0] 9745 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 9746 break 9747 } 9748 v.reset(OpAMD64MOVBQZX) 9749 v.AddArg(x) 9750 return true 9751 } 9752 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 9753 // cond: is32Bit(int64(off1)+int64(off2)) 9754 // result: (MOVBload [off1+off2] {sym} ptr mem) 9755 for { 9756 off1 := auxIntToInt32(v.AuxInt) 9757 sym := auxToSym(v.Aux) 9758 if v_0.Op != OpAMD64ADDQconst { 9759 break 9760 } 9761 off2 := auxIntToInt32(v_0.AuxInt) 9762 ptr := v_0.Args[0] 9763 mem := v_1 9764 if !(is32Bit(int64(off1) + int64(off2))) { 9765 break 9766 } 9767 v.reset(OpAMD64MOVBload) 9768 v.AuxInt = int32ToAuxInt(off1 + off2) 9769 v.Aux = symToAux(sym) 9770 v.AddArg2(ptr, mem) 9771 return true 9772 } 9773 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9774 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 9775 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9776 for { 9777 off1 := auxIntToInt32(v.AuxInt) 9778 sym1 := auxToSym(v.Aux) 9779 if v_0.Op != OpAMD64LEAQ { 9780 break 9781 } 9782 off2 := auxIntToInt32(v_0.AuxInt) 9783 sym2 := auxToSym(v_0.Aux) 9784 base := v_0.Args[0] 9785 mem := v_1 9786 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 9787 break 9788 } 9789 v.reset(OpAMD64MOVBload) 9790 v.AuxInt = int32ToAuxInt(off1 + off2) 9791 v.Aux = symToAux(mergeSym(sym1, sym2)) 9792 v.AddArg2(base, mem) 9793 return true 9794 } 9795 // match: (MOVBload [off] {sym} (SB) _) 9796 // cond: symIsRO(sym) 9797 // result: (MOVLconst [int32(read8(sym, int64(off)))]) 9798 for { 9799 off := auxIntToInt32(v.AuxInt) 9800 sym := auxToSym(v.Aux) 9801 if v_0.Op != OpSB || !(symIsRO(sym)) { 9802 break 9803 } 9804 v.reset(OpAMD64MOVLconst) 9805 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off)))) 9806 return true 9807 } 9808 return false 9809 } 9810 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { 9811 v_2 := v.Args[2] 9812 v_1 := v.Args[1] 9813 v_0 := v.Args[0] 9814 // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) 9815 // cond: y.Uses == 1 9816 // result: (SETLstore [off] {sym} ptr x mem) 9817 for { 9818 off := auxIntToInt32(v.AuxInt) 9819 sym := auxToSym(v.Aux) 9820 ptr := v_0 9821 y := v_1 9822 if y.Op != OpAMD64SETL { 9823 break 9824 } 9825 x := y.Args[0] 9826 mem := v_2 9827 if !(y.Uses == 1) { 9828 break 9829 } 9830 v.reset(OpAMD64SETLstore) 9831 v.AuxInt = int32ToAuxInt(off) 9832 v.Aux = symToAux(sym) 9833 v.AddArg3(ptr, x, mem) 9834 return true 9835 } 9836 // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) 9837 // cond: y.Uses == 1 9838 // result: (SETLEstore [off] {sym} ptr x mem) 9839 for { 9840 off := auxIntToInt32(v.AuxInt) 9841 sym := auxToSym(v.Aux) 9842 ptr := v_0 9843 y := v_1 9844 if y.Op != OpAMD64SETLE { 9845 break 9846 } 9847 x := y.Args[0] 9848 mem := v_2 9849 if !(y.Uses == 1) { 9850 break 9851 } 9852 v.reset(OpAMD64SETLEstore) 9853 v.AuxInt = int32ToAuxInt(off) 9854 v.Aux = symToAux(sym) 9855 v.AddArg3(ptr, x, mem) 9856 return true 9857 } 9858 // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) 9859 // cond: y.Uses == 1 9860 // result: (SETGstore [off] {sym} ptr x mem) 9861 for { 9862 off := auxIntToInt32(v.AuxInt) 9863 sym := auxToSym(v.Aux) 9864 ptr := v_0 9865 y := v_1 9866 if y.Op != OpAMD64SETG { 9867 break 9868 } 9869 x := y.Args[0] 9870 mem := v_2 9871 if !(y.Uses == 1) { 9872 break 9873 } 9874 v.reset(OpAMD64SETGstore) 9875 v.AuxInt = int32ToAuxInt(off) 9876 v.Aux = symToAux(sym) 9877 v.AddArg3(ptr, x, mem) 9878 return true 9879 } 9880 // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) 9881 // cond: y.Uses == 1 9882 // result: (SETGEstore [off] {sym} ptr x mem) 9883 for { 9884 off := auxIntToInt32(v.AuxInt) 9885 sym := auxToSym(v.Aux) 9886 ptr := v_0 9887 y := v_1 9888 if y.Op != OpAMD64SETGE { 9889 break 9890 } 9891 x := y.Args[0] 9892 mem := v_2 9893 if !(y.Uses == 1) { 9894 break 9895 } 9896 v.reset(OpAMD64SETGEstore) 9897 v.AuxInt = int32ToAuxInt(off) 9898 v.Aux = symToAux(sym) 9899 v.AddArg3(ptr, x, mem) 9900 return true 9901 } 9902 // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) 9903 // cond: y.Uses == 1 9904 // result: (SETEQstore [off] {sym} ptr x mem) 9905 for { 9906 off := auxIntToInt32(v.AuxInt) 9907 sym := auxToSym(v.Aux) 9908 ptr := v_0 9909 y := v_1 9910 if y.Op != OpAMD64SETEQ { 9911 break 9912 } 9913 x := y.Args[0] 9914 mem := v_2 9915 if !(y.Uses == 1) { 9916 break 9917 } 9918 v.reset(OpAMD64SETEQstore) 9919 v.AuxInt = int32ToAuxInt(off) 9920 v.Aux = symToAux(sym) 9921 v.AddArg3(ptr, x, mem) 9922 return true 9923 } 9924 // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) 9925 // cond: y.Uses == 1 9926 // result: (SETNEstore [off] {sym} ptr x mem) 9927 for { 9928 off := auxIntToInt32(v.AuxInt) 9929 sym := auxToSym(v.Aux) 9930 ptr := v_0 9931 y := v_1 9932 if y.Op != OpAMD64SETNE { 9933 break 9934 } 9935 x := y.Args[0] 9936 mem := v_2 9937 if !(y.Uses == 1) { 9938 break 9939 } 9940 v.reset(OpAMD64SETNEstore) 9941 v.AuxInt = int32ToAuxInt(off) 9942 v.Aux = symToAux(sym) 9943 v.AddArg3(ptr, x, mem) 9944 return true 9945 } 9946 // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) 9947 // cond: y.Uses == 1 9948 // result: (SETBstore [off] {sym} ptr x mem) 9949 for { 9950 off := auxIntToInt32(v.AuxInt) 9951 sym := auxToSym(v.Aux) 9952 ptr := v_0 9953 y := v_1 9954 if y.Op != OpAMD64SETB { 9955 break 9956 } 9957 x := y.Args[0] 9958 mem := v_2 9959 if !(y.Uses == 1) { 9960 break 9961 } 9962 v.reset(OpAMD64SETBstore) 9963 v.AuxInt = int32ToAuxInt(off) 9964 v.Aux = symToAux(sym) 9965 v.AddArg3(ptr, x, mem) 9966 return true 9967 } 9968 // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) 9969 // cond: y.Uses == 1 9970 // result: (SETBEstore [off] {sym} ptr x mem) 9971 for { 9972 off := auxIntToInt32(v.AuxInt) 9973 sym := auxToSym(v.Aux) 9974 ptr := v_0 9975 y := v_1 9976 if y.Op != OpAMD64SETBE { 9977 break 9978 } 9979 x := y.Args[0] 9980 mem := v_2 9981 if !(y.Uses == 1) { 9982 break 9983 } 9984 v.reset(OpAMD64SETBEstore) 9985 v.AuxInt = int32ToAuxInt(off) 9986 v.Aux = symToAux(sym) 9987 v.AddArg3(ptr, x, mem) 9988 return true 9989 } 9990 // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) 9991 // cond: y.Uses == 1 9992 // result: (SETAstore [off] {sym} ptr x mem) 9993 for { 9994 off := auxIntToInt32(v.AuxInt) 9995 sym := auxToSym(v.Aux) 9996 ptr := v_0 9997 y := v_1 9998 if y.Op != OpAMD64SETA { 9999 break 10000 } 10001 x := y.Args[0] 10002 mem := v_2 10003 if !(y.Uses == 1) { 10004 break 10005 } 10006 v.reset(OpAMD64SETAstore) 10007 v.AuxInt = int32ToAuxInt(off) 10008 v.Aux = symToAux(sym) 10009 v.AddArg3(ptr, x, mem) 10010 return true 10011 } 10012 // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) 10013 // cond: y.Uses == 1 10014 // result: (SETAEstore [off] {sym} ptr x mem) 10015 for { 10016 off := auxIntToInt32(v.AuxInt) 10017 sym := auxToSym(v.Aux) 10018 ptr := v_0 10019 y := v_1 10020 if y.Op != OpAMD64SETAE { 10021 break 10022 } 10023 x := y.Args[0] 10024 mem := v_2 10025 if !(y.Uses == 1) { 10026 break 10027 } 10028 v.reset(OpAMD64SETAEstore) 10029 v.AuxInt = int32ToAuxInt(off) 10030 v.Aux = symToAux(sym) 10031 v.AddArg3(ptr, x, mem) 10032 return true 10033 } 10034 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 10035 // result: (MOVBstore [off] {sym} ptr x mem) 10036 for { 10037 off := auxIntToInt32(v.AuxInt) 10038 sym := auxToSym(v.Aux) 10039 ptr := v_0 10040 if v_1.Op != OpAMD64MOVBQSX { 10041 break 10042 } 10043 x := v_1.Args[0] 10044 mem := v_2 10045 v.reset(OpAMD64MOVBstore) 10046 v.AuxInt = int32ToAuxInt(off) 10047 v.Aux = symToAux(sym) 10048 v.AddArg3(ptr, x, mem) 10049 return true 10050 } 10051 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 10052 // result: (MOVBstore [off] {sym} ptr x mem) 10053 for { 10054 off := auxIntToInt32(v.AuxInt) 10055 sym := auxToSym(v.Aux) 10056 ptr := v_0 10057 if v_1.Op != OpAMD64MOVBQZX { 10058 break 10059 } 10060 x := v_1.Args[0] 10061 mem := v_2 10062 v.reset(OpAMD64MOVBstore) 10063 v.AuxInt = int32ToAuxInt(off) 10064 v.Aux = symToAux(sym) 10065 v.AddArg3(ptr, x, mem) 10066 return true 10067 } 10068 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10069 // cond: is32Bit(int64(off1)+int64(off2)) 10070 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 10071 for { 10072 off1 := auxIntToInt32(v.AuxInt) 10073 sym := auxToSym(v.Aux) 10074 if v_0.Op != OpAMD64ADDQconst { 10075 break 10076 } 10077 off2 := auxIntToInt32(v_0.AuxInt) 10078 ptr := v_0.Args[0] 10079 val := v_1 10080 mem := v_2 10081 if !(is32Bit(int64(off1) + int64(off2))) { 10082 break 10083 } 10084 v.reset(OpAMD64MOVBstore) 10085 v.AuxInt = int32ToAuxInt(off1 + off2) 10086 v.Aux = symToAux(sym) 10087 v.AddArg3(ptr, val, mem) 10088 return true 10089 } 10090 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 10091 // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) 10092 for { 10093 off := auxIntToInt32(v.AuxInt) 10094 sym := auxToSym(v.Aux) 10095 ptr := v_0 10096 if v_1.Op != OpAMD64MOVLconst { 10097 break 10098 } 10099 c := auxIntToInt32(v_1.AuxInt) 10100 mem := v_2 10101 v.reset(OpAMD64MOVBstoreconst) 10102 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) 10103 v.Aux = symToAux(sym) 10104 v.AddArg2(ptr, mem) 10105 return true 10106 } 10107 // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem) 10108 // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) 10109 for { 10110 off := auxIntToInt32(v.AuxInt) 10111 sym := auxToSym(v.Aux) 10112 ptr := v_0 10113 if v_1.Op != OpAMD64MOVQconst { 10114 break 10115 } 10116 c := auxIntToInt64(v_1.AuxInt) 10117 mem := v_2 10118 v.reset(OpAMD64MOVBstoreconst) 10119 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) 10120 v.Aux = symToAux(sym) 10121 v.AddArg2(ptr, mem) 10122 return true 10123 } 10124 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10125 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 10126 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10127 for { 10128 off1 := auxIntToInt32(v.AuxInt) 10129 sym1 := auxToSym(v.Aux) 10130 if v_0.Op != OpAMD64LEAQ { 10131 break 10132 } 10133 off2 := auxIntToInt32(v_0.AuxInt) 10134 sym2 := auxToSym(v_0.Aux) 10135 base := v_0.Args[0] 10136 val := v_1 10137 mem := v_2 10138 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 10139 break 10140 } 10141 v.reset(OpAMD64MOVBstore) 10142 v.AuxInt = int32ToAuxInt(off1 + off2) 10143 v.Aux = symToAux(mergeSym(sym1, sym2)) 10144 v.AddArg3(base, val, mem) 10145 return true 10146 } 10147 return false 10148 } 10149 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { 10150 v_1 := v.Args[1] 10151 v_0 := v.Args[0] 10152 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 10153 // cond: ValAndOff(sc).canAdd32(off) 10154 // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) 10155 for { 10156 sc := auxIntToValAndOff(v.AuxInt) 10157 s := auxToSym(v.Aux) 10158 if v_0.Op != OpAMD64ADDQconst { 10159 break 10160 } 10161 off := auxIntToInt32(v_0.AuxInt) 10162 ptr := v_0.Args[0] 10163 mem := v_1 10164 if !(ValAndOff(sc).canAdd32(off)) { 10165 break 10166 } 10167 v.reset(OpAMD64MOVBstoreconst) 10168 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 10169 v.Aux = symToAux(s) 10170 v.AddArg2(ptr, mem) 10171 return true 10172 } 10173 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 10174 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) 10175 // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) 10176 for { 10177 sc := auxIntToValAndOff(v.AuxInt) 10178 sym1 := auxToSym(v.Aux) 10179 if v_0.Op != OpAMD64LEAQ { 10180 break 10181 } 10182 off := auxIntToInt32(v_0.AuxInt) 10183 sym2 := auxToSym(v_0.Aux) 10184 ptr := v_0.Args[0] 10185 mem := v_1 10186 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { 10187 break 10188 } 10189 v.reset(OpAMD64MOVBstoreconst) 10190 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 10191 v.Aux = symToAux(mergeSym(sym1, sym2)) 10192 v.AddArg2(ptr, mem) 10193 return true 10194 } 10195 return false 10196 } 10197 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { 10198 v_0 := v.Args[0] 10199 b := v.Block 10200 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 10201 // cond: x.Uses == 1 && clobber(x) 10202 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 10203 for { 10204 x := v_0 10205 if x.Op != OpAMD64MOVLload { 10206 break 10207 } 10208 off := auxIntToInt32(x.AuxInt) 10209 sym := auxToSym(x.Aux) 10210 mem := x.Args[1] 10211 ptr := x.Args[0] 10212 if !(x.Uses == 1 && clobber(x)) { 10213 break 10214 } 10215 b = x.Block 10216 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) 10217 v.copyOf(v0) 10218 v0.AuxInt = int32ToAuxInt(off) 10219 v0.Aux = symToAux(sym) 10220 v0.AddArg2(ptr, mem) 10221 return true 10222 } 10223 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 10224 // cond: x.Uses == 1 && clobber(x) 10225 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 10226 for { 10227 x := v_0 10228 if x.Op != OpAMD64MOVQload { 10229 break 10230 } 10231 off := auxIntToInt32(x.AuxInt) 10232 sym := auxToSym(x.Aux) 10233 mem := x.Args[1] 10234 ptr := x.Args[0] 10235 if !(x.Uses == 1 && clobber(x)) { 10236 break 10237 } 10238 b = x.Block 10239 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) 10240 v.copyOf(v0) 10241 v0.AuxInt = int32ToAuxInt(off) 10242 v0.Aux = symToAux(sym) 10243 v0.AddArg2(ptr, mem) 10244 return true 10245 } 10246 // match: (MOVLQSX (ANDLconst [c] x)) 10247 // cond: uint32(c) & 0x80000000 == 0 10248 // result: (ANDLconst [c & 0x7fffffff] x) 10249 for { 10250 if v_0.Op != OpAMD64ANDLconst { 10251 break 10252 } 10253 c := auxIntToInt32(v_0.AuxInt) 10254 x := v_0.Args[0] 10255 if !(uint32(c)&0x80000000 == 0) { 10256 break 10257 } 10258 v.reset(OpAMD64ANDLconst) 10259 v.AuxInt = int32ToAuxInt(c & 0x7fffffff) 10260 v.AddArg(x) 10261 return true 10262 } 10263 // match: (MOVLQSX (MOVLQSX x)) 10264 // result: (MOVLQSX x) 10265 for { 10266 if v_0.Op != OpAMD64MOVLQSX { 10267 break 10268 } 10269 x := v_0.Args[0] 10270 v.reset(OpAMD64MOVLQSX) 10271 v.AddArg(x) 10272 return true 10273 } 10274 // match: (MOVLQSX (MOVWQSX x)) 10275 // result: (MOVWQSX x) 10276 for { 10277 if v_0.Op != OpAMD64MOVWQSX { 10278 break 10279 } 10280 x := v_0.Args[0] 10281 v.reset(OpAMD64MOVWQSX) 10282 v.AddArg(x) 10283 return true 10284 } 10285 // match: (MOVLQSX (MOVBQSX x)) 10286 // result: (MOVBQSX x) 10287 for { 10288 if v_0.Op != OpAMD64MOVBQSX { 10289 break 10290 } 10291 x := v_0.Args[0] 10292 v.reset(OpAMD64MOVBQSX) 10293 v.AddArg(x) 10294 return true 10295 } 10296 return false 10297 } 10298 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool { 10299 v_1 := v.Args[1] 10300 v_0 := v.Args[0] 10301 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 10302 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 10303 // result: (MOVLQSX x) 10304 for { 10305 off := auxIntToInt32(v.AuxInt) 10306 sym := auxToSym(v.Aux) 10307 ptr := v_0 10308 if v_1.Op != OpAMD64MOVLstore { 10309 break 10310 } 10311 off2 := auxIntToInt32(v_1.AuxInt) 10312 sym2 := auxToSym(v_1.Aux) 10313 x := v_1.Args[1] 10314 ptr2 := v_1.Args[0] 10315 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 10316 break 10317 } 10318 v.reset(OpAMD64MOVLQSX) 10319 v.AddArg(x) 10320 return true 10321 } 10322 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10323 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 10324 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10325 for { 10326 off1 := auxIntToInt32(v.AuxInt) 10327 sym1 := auxToSym(v.Aux) 10328 if v_0.Op != OpAMD64LEAQ { 10329 break 10330 } 10331 off2 := auxIntToInt32(v_0.AuxInt) 10332 sym2 := auxToSym(v_0.Aux) 10333 base := v_0.Args[0] 10334 mem := v_1 10335 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 10336 break 10337 } 10338 v.reset(OpAMD64MOVLQSXload) 10339 v.AuxInt = int32ToAuxInt(off1 + off2) 10340 v.Aux = symToAux(mergeSym(sym1, sym2)) 10341 v.AddArg2(base, mem) 10342 return true 10343 } 10344 return false 10345 } 10346 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { 10347 v_0 := v.Args[0] 10348 b := v.Block 10349 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 10350 // cond: x.Uses == 1 && clobber(x) 10351 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 10352 for { 10353 x := v_0 10354 if x.Op != OpAMD64MOVLload { 10355 break 10356 } 10357 off := auxIntToInt32(x.AuxInt) 10358 sym := auxToSym(x.Aux) 10359 mem := x.Args[1] 10360 ptr := x.Args[0] 10361 if !(x.Uses == 1 && clobber(x)) { 10362 break 10363 } 10364 b = x.Block 10365 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) 10366 v.copyOf(v0) 10367 v0.AuxInt = int32ToAuxInt(off) 10368 v0.Aux = symToAux(sym) 10369 v0.AddArg2(ptr, mem) 10370 return true 10371 } 10372 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 10373 // cond: x.Uses == 1 && clobber(x) 10374 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 10375 for { 10376 x := v_0 10377 if x.Op != OpAMD64MOVQload { 10378 break 10379 } 10380 off := auxIntToInt32(x.AuxInt) 10381 sym := auxToSym(x.Aux) 10382 mem := x.Args[1] 10383 ptr := x.Args[0] 10384 if !(x.Uses == 1 && clobber(x)) { 10385 break 10386 } 10387 b = x.Block 10388 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) 10389 v.copyOf(v0) 10390 v0.AuxInt = int32ToAuxInt(off) 10391 v0.Aux = symToAux(sym) 10392 v0.AddArg2(ptr, mem) 10393 return true 10394 } 10395 // match: (MOVLQZX x) 10396 // cond: zeroUpper32Bits(x,3) 10397 // result: x 10398 for { 10399 x := v_0 10400 if !(zeroUpper32Bits(x, 3)) { 10401 break 10402 } 10403 v.copyOf(x) 10404 return true 10405 } 10406 // match: (MOVLQZX (ANDLconst [c] x)) 10407 // result: (ANDLconst [c] x) 10408 for { 10409 if v_0.Op != OpAMD64ANDLconst { 10410 break 10411 } 10412 c := auxIntToInt32(v_0.AuxInt) 10413 x := v_0.Args[0] 10414 v.reset(OpAMD64ANDLconst) 10415 v.AuxInt = int32ToAuxInt(c) 10416 v.AddArg(x) 10417 return true 10418 } 10419 // match: (MOVLQZX (MOVLQZX x)) 10420 // result: (MOVLQZX x) 10421 for { 10422 if v_0.Op != OpAMD64MOVLQZX { 10423 break 10424 } 10425 x := v_0.Args[0] 10426 v.reset(OpAMD64MOVLQZX) 10427 v.AddArg(x) 10428 return true 10429 } 10430 // match: (MOVLQZX (MOVWQZX x)) 10431 // result: (MOVWQZX x) 10432 for { 10433 if v_0.Op != OpAMD64MOVWQZX { 10434 break 10435 } 10436 x := v_0.Args[0] 10437 v.reset(OpAMD64MOVWQZX) 10438 v.AddArg(x) 10439 return true 10440 } 10441 // match: (MOVLQZX (MOVBQZX x)) 10442 // result: (MOVBQZX x) 10443 for { 10444 if v_0.Op != OpAMD64MOVBQZX { 10445 break 10446 } 10447 x := v_0.Args[0] 10448 v.reset(OpAMD64MOVBQZX) 10449 v.AddArg(x) 10450 return true 10451 } 10452 return false 10453 } 10454 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool { 10455 v_1 := v.Args[1] 10456 v_0 := v.Args[0] 10457 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 10458 // cond: is32Bit(int64(off1)+int64(off2)) 10459 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 10460 for { 10461 off1 := auxIntToInt32(v.AuxInt) 10462 sym := auxToSym(v.Aux) 10463 if v_0.Op != OpAMD64ADDQconst { 10464 break 10465 } 10466 off2 := auxIntToInt32(v_0.AuxInt) 10467 ptr := v_0.Args[0] 10468 mem := v_1 10469 if !(is32Bit(int64(off1) + int64(off2))) { 10470 break 10471 } 10472 v.reset(OpAMD64MOVLatomicload) 10473 v.AuxInt = int32ToAuxInt(off1 + off2) 10474 v.Aux = symToAux(sym) 10475 v.AddArg2(ptr, mem) 10476 return true 10477 } 10478 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 10479 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 10480 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem) 10481 for { 10482 off1 := auxIntToInt32(v.AuxInt) 10483 sym1 := auxToSym(v.Aux) 10484 if v_0.Op != OpAMD64LEAQ { 10485 break 10486 } 10487 off2 := auxIntToInt32(v_0.AuxInt) 10488 sym2 := auxToSym(v_0.Aux) 10489 ptr := v_0.Args[0] 10490 mem := v_1 10491 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 10492 break 10493 } 10494 v.reset(OpAMD64MOVLatomicload) 10495 v.AuxInt = int32ToAuxInt(off1 + off2) 10496 v.Aux = symToAux(mergeSym(sym1, sym2)) 10497 v.AddArg2(ptr, mem) 10498 return true 10499 } 10500 return false 10501 } 10502 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool { 10503 v_0 := v.Args[0] 10504 b := v.Block 10505 // match: (MOVLf2i <t> (Arg <u> [off] {sym})) 10506 // cond: t.Size() == u.Size() 10507 // result: @b.Func.Entry (Arg <t> [off] {sym}) 10508 for { 10509 t := v.Type 10510 if v_0.Op != OpArg { 10511 break 10512 } 10513 u := v_0.Type 10514 off := auxIntToInt32(v_0.AuxInt) 10515 sym := auxToSym(v_0.Aux) 10516 if !(t.Size() == u.Size()) { 10517 break 10518 } 10519 b = b.Func.Entry 10520 v0 := b.NewValue0(v.Pos, OpArg, t) 10521 v.copyOf(v0) 10522 v0.AuxInt = int32ToAuxInt(off) 10523 v0.Aux = symToAux(sym) 10524 return true 10525 } 10526 return false 10527 } 10528 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool { 10529 v_0 := v.Args[0] 10530 b := v.Block 10531 // match: (MOVLi2f <t> (Arg <u> [off] {sym})) 10532 // cond: t.Size() == u.Size() 10533 // result: @b.Func.Entry (Arg <t> [off] {sym}) 10534 for { 10535 t := v.Type 10536 if v_0.Op != OpArg { 10537 break 10538 } 10539 u := v_0.Type 10540 off := auxIntToInt32(v_0.AuxInt) 10541 sym := auxToSym(v_0.Aux) 10542 if !(t.Size() == u.Size()) { 10543 break 10544 } 10545 b = b.Func.Entry 10546 v0 := b.NewValue0(v.Pos, OpArg, t) 10547 v.copyOf(v0) 10548 v0.AuxInt = int32ToAuxInt(off) 10549 v0.Aux = symToAux(sym) 10550 return true 10551 } 10552 return false 10553 } 10554 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { 10555 v_1 := v.Args[1] 10556 v_0 := v.Args[0] 10557 b := v.Block 10558 config := b.Func.Config 10559 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 10560 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 10561 // result: (MOVLQZX x) 10562 for { 10563 off := auxIntToInt32(v.AuxInt) 10564 sym := auxToSym(v.Aux) 10565 ptr := v_0 10566 if v_1.Op != OpAMD64MOVLstore { 10567 break 10568 } 10569 off2 := auxIntToInt32(v_1.AuxInt) 10570 sym2 := auxToSym(v_1.Aux) 10571 x := v_1.Args[1] 10572 ptr2 := v_1.Args[0] 10573 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 10574 break 10575 } 10576 v.reset(OpAMD64MOVLQZX) 10577 v.AddArg(x) 10578 return true 10579 } 10580 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 10581 // cond: is32Bit(int64(off1)+int64(off2)) 10582 // result: (MOVLload [off1+off2] {sym} ptr mem) 10583 for { 10584 off1 := auxIntToInt32(v.AuxInt) 10585 sym := auxToSym(v.Aux) 10586 if v_0.Op != OpAMD64ADDQconst { 10587 break 10588 } 10589 off2 := auxIntToInt32(v_0.AuxInt) 10590 ptr := v_0.Args[0] 10591 mem := v_1 10592 if !(is32Bit(int64(off1) + int64(off2))) { 10593 break 10594 } 10595 v.reset(OpAMD64MOVLload) 10596 v.AuxInt = int32ToAuxInt(off1 + off2) 10597 v.Aux = symToAux(sym) 10598 v.AddArg2(ptr, mem) 10599 return true 10600 } 10601 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10602 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 10603 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10604 for { 10605 off1 := auxIntToInt32(v.AuxInt) 10606 sym1 := auxToSym(v.Aux) 10607 if v_0.Op != OpAMD64LEAQ { 10608 break 10609 } 10610 off2 := auxIntToInt32(v_0.AuxInt) 10611 sym2 := auxToSym(v_0.Aux) 10612 base := v_0.Args[0] 10613 mem := v_1 10614 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 10615 break 10616 } 10617 v.reset(OpAMD64MOVLload) 10618 v.AuxInt = int32ToAuxInt(off1 + off2) 10619 v.Aux = symToAux(mergeSym(sym1, sym2)) 10620 v.AddArg2(base, mem) 10621 return true 10622 } 10623 // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) 10624 // result: (MOVLf2i val) 10625 for { 10626 off := auxIntToInt32(v.AuxInt) 10627 sym := auxToSym(v.Aux) 10628 ptr := v_0 10629 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { 10630 break 10631 } 10632 val := v_1.Args[1] 10633 if ptr != v_1.Args[0] { 10634 break 10635 } 10636 v.reset(OpAMD64MOVLf2i) 10637 v.AddArg(val) 10638 return true 10639 } 10640 // match: (MOVLload [off] {sym} (SB) _) 10641 // cond: symIsRO(sym) 10642 // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) 10643 for { 10644 off := auxIntToInt32(v.AuxInt) 10645 sym := auxToSym(v.Aux) 10646 if v_0.Op != OpSB || !(symIsRO(sym)) { 10647 break 10648 } 10649 v.reset(OpAMD64MOVQconst) 10650 v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))) 10651 return true 10652 } 10653 return false 10654 } 10655 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { 10656 v_2 := v.Args[2] 10657 v_1 := v.Args[1] 10658 v_0 := v.Args[0] 10659 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 10660 // result: (MOVLstore [off] {sym} ptr x mem) 10661 for { 10662 off := auxIntToInt32(v.AuxInt) 10663 sym := auxToSym(v.Aux) 10664 ptr := v_0 10665 if v_1.Op != OpAMD64MOVLQSX { 10666 break 10667 } 10668 x := v_1.Args[0] 10669 mem := v_2 10670 v.reset(OpAMD64MOVLstore) 10671 v.AuxInt = int32ToAuxInt(off) 10672 v.Aux = symToAux(sym) 10673 v.AddArg3(ptr, x, mem) 10674 return true 10675 } 10676 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 10677 // result: (MOVLstore [off] {sym} ptr x mem) 10678 for { 10679 off := auxIntToInt32(v.AuxInt) 10680 sym := auxToSym(v.Aux) 10681 ptr := v_0 10682 if v_1.Op != OpAMD64MOVLQZX { 10683 break 10684 } 10685 x := v_1.Args[0] 10686 mem := v_2 10687 v.reset(OpAMD64MOVLstore) 10688 v.AuxInt = int32ToAuxInt(off) 10689 v.Aux = symToAux(sym) 10690 v.AddArg3(ptr, x, mem) 10691 return true 10692 } 10693 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10694 // cond: is32Bit(int64(off1)+int64(off2)) 10695 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 10696 for { 10697 off1 := auxIntToInt32(v.AuxInt) 10698 sym := auxToSym(v.Aux) 10699 if v_0.Op != OpAMD64ADDQconst { 10700 break 10701 } 10702 off2 := auxIntToInt32(v_0.AuxInt) 10703 ptr := v_0.Args[0] 10704 val := v_1 10705 mem := v_2 10706 if !(is32Bit(int64(off1) + int64(off2))) { 10707 break 10708 } 10709 v.reset(OpAMD64MOVLstore) 10710 v.AuxInt = int32ToAuxInt(off1 + off2) 10711 v.Aux = symToAux(sym) 10712 v.AddArg3(ptr, val, mem) 10713 return true 10714 } 10715 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 10716 // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) 10717 for { 10718 off := auxIntToInt32(v.AuxInt) 10719 sym := auxToSym(v.Aux) 10720 ptr := v_0 10721 if v_1.Op != OpAMD64MOVLconst { 10722 break 10723 } 10724 c := auxIntToInt32(v_1.AuxInt) 10725 mem := v_2 10726 v.reset(OpAMD64MOVLstoreconst) 10727 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 10728 v.Aux = symToAux(sym) 10729 v.AddArg2(ptr, mem) 10730 return true 10731 } 10732 // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem) 10733 // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) 10734 for { 10735 off := auxIntToInt32(v.AuxInt) 10736 sym := auxToSym(v.Aux) 10737 ptr := v_0 10738 if v_1.Op != OpAMD64MOVQconst { 10739 break 10740 } 10741 c := auxIntToInt64(v_1.AuxInt) 10742 mem := v_2 10743 v.reset(OpAMD64MOVLstoreconst) 10744 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 10745 v.Aux = symToAux(sym) 10746 v.AddArg2(ptr, mem) 10747 return true 10748 } 10749 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10750 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 10751 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10752 for { 10753 off1 := auxIntToInt32(v.AuxInt) 10754 sym1 := auxToSym(v.Aux) 10755 if v_0.Op != OpAMD64LEAQ { 10756 break 10757 } 10758 off2 := auxIntToInt32(v_0.AuxInt) 10759 sym2 := auxToSym(v_0.Aux) 10760 base := v_0.Args[0] 10761 val := v_1 10762 mem := v_2 10763 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 10764 break 10765 } 10766 v.reset(OpAMD64MOVLstore) 10767 v.AuxInt = int32ToAuxInt(off1 + off2) 10768 v.Aux = symToAux(mergeSym(sym1, sym2)) 10769 v.AddArg3(base, val, mem) 10770 return true 10771 } 10772 // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) 10773 // cond: y.Uses==1 && clobber(y) 10774 // result: (ADDLmodify [off] {sym} ptr x mem) 10775 for { 10776 off := auxIntToInt32(v.AuxInt) 10777 sym := auxToSym(v.Aux) 10778 ptr := v_0 10779 y := v_1 10780 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 10781 break 10782 } 10783 mem := y.Args[2] 10784 x := y.Args[0] 10785 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 10786 break 10787 } 10788 v.reset(OpAMD64ADDLmodify) 10789 v.AuxInt = int32ToAuxInt(off) 10790 v.Aux = symToAux(sym) 10791 v.AddArg3(ptr, x, mem) 10792 return true 10793 } 10794 // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) 10795 // cond: y.Uses==1 && clobber(y) 10796 // result: (ANDLmodify [off] {sym} ptr x mem) 10797 for { 10798 off := auxIntToInt32(v.AuxInt) 10799 sym := auxToSym(v.Aux) 10800 ptr := v_0 10801 y := v_1 10802 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 10803 break 10804 } 10805 mem := y.Args[2] 10806 x := y.Args[0] 10807 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 10808 break 10809 } 10810 v.reset(OpAMD64ANDLmodify) 10811 v.AuxInt = int32ToAuxInt(off) 10812 v.Aux = symToAux(sym) 10813 v.AddArg3(ptr, x, mem) 10814 return true 10815 } 10816 // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) 10817 // cond: y.Uses==1 && clobber(y) 10818 // result: (ORLmodify [off] {sym} ptr x mem) 10819 for { 10820 off := auxIntToInt32(v.AuxInt) 10821 sym := auxToSym(v.Aux) 10822 ptr := v_0 10823 y := v_1 10824 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 10825 break 10826 } 10827 mem := y.Args[2] 10828 x := y.Args[0] 10829 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 10830 break 10831 } 10832 v.reset(OpAMD64ORLmodify) 10833 v.AuxInt = int32ToAuxInt(off) 10834 v.Aux = symToAux(sym) 10835 v.AddArg3(ptr, x, mem) 10836 return true 10837 } 10838 // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) 10839 // cond: y.Uses==1 && clobber(y) 10840 // result: (XORLmodify [off] {sym} ptr x mem) 10841 for { 10842 off := auxIntToInt32(v.AuxInt) 10843 sym := auxToSym(v.Aux) 10844 ptr := v_0 10845 y := v_1 10846 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 10847 break 10848 } 10849 mem := y.Args[2] 10850 x := y.Args[0] 10851 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 10852 break 10853 } 10854 v.reset(OpAMD64XORLmodify) 10855 v.AuxInt = int32ToAuxInt(off) 10856 v.Aux = symToAux(sym) 10857 v.AddArg3(ptr, x, mem) 10858 return true 10859 } 10860 // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) 10861 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 10862 // result: (ADDLmodify [off] {sym} ptr x mem) 10863 for { 10864 off := auxIntToInt32(v.AuxInt) 10865 sym := auxToSym(v.Aux) 10866 ptr := v_0 10867 y := v_1 10868 if y.Op != OpAMD64ADDL { 10869 break 10870 } 10871 _ = y.Args[1] 10872 y_0 := y.Args[0] 10873 y_1 := y.Args[1] 10874 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 10875 l := y_0 10876 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 10877 continue 10878 } 10879 mem := l.Args[1] 10880 if ptr != l.Args[0] { 10881 continue 10882 } 10883 x := y_1 10884 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 10885 continue 10886 } 10887 v.reset(OpAMD64ADDLmodify) 10888 v.AuxInt = int32ToAuxInt(off) 10889 v.Aux = symToAux(sym) 10890 v.AddArg3(ptr, x, mem) 10891 return true 10892 } 10893 break 10894 } 10895 // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) 10896 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 10897 // result: (SUBLmodify [off] {sym} ptr x mem) 10898 for { 10899 off := auxIntToInt32(v.AuxInt) 10900 sym := auxToSym(v.Aux) 10901 ptr := v_0 10902 y := v_1 10903 if y.Op != OpAMD64SUBL { 10904 break 10905 } 10906 x := y.Args[1] 10907 l := y.Args[0] 10908 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 10909 break 10910 } 10911 mem := l.Args[1] 10912 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 10913 break 10914 } 10915 v.reset(OpAMD64SUBLmodify) 10916 v.AuxInt = int32ToAuxInt(off) 10917 v.Aux = symToAux(sym) 10918 v.AddArg3(ptr, x, mem) 10919 return true 10920 } 10921 // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) 10922 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 10923 // result: (ANDLmodify [off] {sym} ptr x mem) 10924 for { 10925 off := auxIntToInt32(v.AuxInt) 10926 sym := auxToSym(v.Aux) 10927 ptr := v_0 10928 y := v_1 10929 if y.Op != OpAMD64ANDL { 10930 break 10931 } 10932 _ = y.Args[1] 10933 y_0 := y.Args[0] 10934 y_1 := y.Args[1] 10935 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 10936 l := y_0 10937 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 10938 continue 10939 } 10940 mem := l.Args[1] 10941 if ptr != l.Args[0] { 10942 continue 10943 } 10944 x := y_1 10945 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 10946 continue 10947 } 10948 v.reset(OpAMD64ANDLmodify) 10949 v.AuxInt = int32ToAuxInt(off) 10950 v.Aux = symToAux(sym) 10951 v.AddArg3(ptr, x, mem) 10952 return true 10953 } 10954 break 10955 } 10956 // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem) 10957 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 10958 // result: (ORLmodify [off] {sym} ptr x mem) 10959 for { 10960 off := auxIntToInt32(v.AuxInt) 10961 sym := auxToSym(v.Aux) 10962 ptr := v_0 10963 y := v_1 10964 if y.Op != OpAMD64ORL { 10965 break 10966 } 10967 _ = y.Args[1] 10968 y_0 := y.Args[0] 10969 y_1 := y.Args[1] 10970 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 10971 l := y_0 10972 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 10973 continue 10974 } 10975 mem := l.Args[1] 10976 if ptr != l.Args[0] { 10977 continue 10978 } 10979 x := y_1 10980 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 10981 continue 10982 } 10983 v.reset(OpAMD64ORLmodify) 10984 v.AuxInt = int32ToAuxInt(off) 10985 v.Aux = symToAux(sym) 10986 v.AddArg3(ptr, x, mem) 10987 return true 10988 } 10989 break 10990 } 10991 // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem) 10992 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 10993 // result: (XORLmodify [off] {sym} ptr x mem) 10994 for { 10995 off := auxIntToInt32(v.AuxInt) 10996 sym := auxToSym(v.Aux) 10997 ptr := v_0 10998 y := v_1 10999 if y.Op != OpAMD64XORL { 11000 break 11001 } 11002 _ = y.Args[1] 11003 y_0 := y.Args[0] 11004 y_1 := y.Args[1] 11005 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 11006 l := y_0 11007 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11008 continue 11009 } 11010 mem := l.Args[1] 11011 if ptr != l.Args[0] { 11012 continue 11013 } 11014 x := y_1 11015 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 11016 continue 11017 } 11018 v.reset(OpAMD64XORLmodify) 11019 v.AuxInt = int32ToAuxInt(off) 11020 v.Aux = symToAux(sym) 11021 v.AddArg3(ptr, x, mem) 11022 return true 11023 } 11024 break 11025 } 11026 // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 11027 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 11028 // result: (ADDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 11029 for { 11030 off := auxIntToInt32(v.AuxInt) 11031 sym := auxToSym(v.Aux) 11032 ptr := v_0 11033 a := v_1 11034 if a.Op != OpAMD64ADDLconst { 11035 break 11036 } 11037 c := auxIntToInt32(a.AuxInt) 11038 l := a.Args[0] 11039 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11040 break 11041 } 11042 mem := l.Args[1] 11043 ptr2 := l.Args[0] 11044 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 11045 break 11046 } 11047 v.reset(OpAMD64ADDLconstmodify) 11048 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 11049 v.Aux = symToAux(sym) 11050 v.AddArg2(ptr, mem) 11051 return true 11052 } 11053 // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 11054 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 11055 // result: (ANDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 11056 for { 11057 off := auxIntToInt32(v.AuxInt) 11058 sym := auxToSym(v.Aux) 11059 ptr := v_0 11060 a := v_1 11061 if a.Op != OpAMD64ANDLconst { 11062 break 11063 } 11064 c := auxIntToInt32(a.AuxInt) 11065 l := a.Args[0] 11066 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11067 break 11068 } 11069 mem := l.Args[1] 11070 ptr2 := l.Args[0] 11071 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 11072 break 11073 } 11074 v.reset(OpAMD64ANDLconstmodify) 11075 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 11076 v.Aux = symToAux(sym) 11077 v.AddArg2(ptr, mem) 11078 return true 11079 } 11080 // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 11081 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 11082 // result: (ORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 11083 for { 11084 off := auxIntToInt32(v.AuxInt) 11085 sym := auxToSym(v.Aux) 11086 ptr := v_0 11087 a := v_1 11088 if a.Op != OpAMD64ORLconst { 11089 break 11090 } 11091 c := auxIntToInt32(a.AuxInt) 11092 l := a.Args[0] 11093 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11094 break 11095 } 11096 mem := l.Args[1] 11097 ptr2 := l.Args[0] 11098 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 11099 break 11100 } 11101 v.reset(OpAMD64ORLconstmodify) 11102 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 11103 v.Aux = symToAux(sym) 11104 v.AddArg2(ptr, mem) 11105 return true 11106 } 11107 // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 11108 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 11109 // result: (XORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 11110 for { 11111 off := auxIntToInt32(v.AuxInt) 11112 sym := auxToSym(v.Aux) 11113 ptr := v_0 11114 a := v_1 11115 if a.Op != OpAMD64XORLconst { 11116 break 11117 } 11118 c := auxIntToInt32(a.AuxInt) 11119 l := a.Args[0] 11120 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11121 break 11122 } 11123 mem := l.Args[1] 11124 ptr2 := l.Args[0] 11125 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 11126 break 11127 } 11128 v.reset(OpAMD64XORLconstmodify) 11129 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 11130 v.Aux = symToAux(sym) 11131 v.AddArg2(ptr, mem) 11132 return true 11133 } 11134 // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) 11135 // result: (MOVSSstore [off] {sym} ptr val mem) 11136 for { 11137 off := auxIntToInt32(v.AuxInt) 11138 sym := auxToSym(v.Aux) 11139 ptr := v_0 11140 if v_1.Op != OpAMD64MOVLf2i { 11141 break 11142 } 11143 val := v_1.Args[0] 11144 mem := v_2 11145 v.reset(OpAMD64MOVSSstore) 11146 v.AuxInt = int32ToAuxInt(off) 11147 v.Aux = symToAux(sym) 11148 v.AddArg3(ptr, val, mem) 11149 return true 11150 } 11151 // match: (MOVLstore [i] {s} p x:(BSWAPL w) mem) 11152 // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 11153 // result: (MOVBELstore [i] {s} p w mem) 11154 for { 11155 i := auxIntToInt32(v.AuxInt) 11156 s := auxToSym(v.Aux) 11157 p := v_0 11158 x := v_1 11159 if x.Op != OpAMD64BSWAPL { 11160 break 11161 } 11162 w := x.Args[0] 11163 mem := v_2 11164 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { 11165 break 11166 } 11167 v.reset(OpAMD64MOVBELstore) 11168 v.AuxInt = int32ToAuxInt(i) 11169 v.Aux = symToAux(s) 11170 v.AddArg3(p, w, mem) 11171 return true 11172 } 11173 return false 11174 } 11175 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { 11176 v_1 := v.Args[1] 11177 v_0 := v.Args[0] 11178 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 11179 // cond: ValAndOff(sc).canAdd32(off) 11180 // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) 11181 for { 11182 sc := auxIntToValAndOff(v.AuxInt) 11183 s := auxToSym(v.Aux) 11184 if v_0.Op != OpAMD64ADDQconst { 11185 break 11186 } 11187 off := auxIntToInt32(v_0.AuxInt) 11188 ptr := v_0.Args[0] 11189 mem := v_1 11190 if !(ValAndOff(sc).canAdd32(off)) { 11191 break 11192 } 11193 v.reset(OpAMD64MOVLstoreconst) 11194 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 11195 v.Aux = symToAux(s) 11196 v.AddArg2(ptr, mem) 11197 return true 11198 } 11199 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 11200 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) 11201 // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) 11202 for { 11203 sc := auxIntToValAndOff(v.AuxInt) 11204 sym1 := auxToSym(v.Aux) 11205 if v_0.Op != OpAMD64LEAQ { 11206 break 11207 } 11208 off := auxIntToInt32(v_0.AuxInt) 11209 sym2 := auxToSym(v_0.Aux) 11210 ptr := v_0.Args[0] 11211 mem := v_1 11212 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { 11213 break 11214 } 11215 v.reset(OpAMD64MOVLstoreconst) 11216 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 11217 v.Aux = symToAux(mergeSym(sym1, sym2)) 11218 v.AddArg2(ptr, mem) 11219 return true 11220 } 11221 return false 11222 } 11223 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool { 11224 v_1 := v.Args[1] 11225 v_0 := v.Args[0] 11226 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 11227 // cond: is32Bit(int64(off1)+int64(off2)) 11228 // result: (MOVOload [off1+off2] {sym} ptr mem) 11229 for { 11230 off1 := auxIntToInt32(v.AuxInt) 11231 sym := auxToSym(v.Aux) 11232 if v_0.Op != OpAMD64ADDQconst { 11233 break 11234 } 11235 off2 := auxIntToInt32(v_0.AuxInt) 11236 ptr := v_0.Args[0] 11237 mem := v_1 11238 if !(is32Bit(int64(off1) + int64(off2))) { 11239 break 11240 } 11241 v.reset(OpAMD64MOVOload) 11242 v.AuxInt = int32ToAuxInt(off1 + off2) 11243 v.Aux = symToAux(sym) 11244 v.AddArg2(ptr, mem) 11245 return true 11246 } 11247 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11248 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 11249 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11250 for { 11251 off1 := auxIntToInt32(v.AuxInt) 11252 sym1 := auxToSym(v.Aux) 11253 if v_0.Op != OpAMD64LEAQ { 11254 break 11255 } 11256 off2 := auxIntToInt32(v_0.AuxInt) 11257 sym2 := auxToSym(v_0.Aux) 11258 base := v_0.Args[0] 11259 mem := v_1 11260 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 11261 break 11262 } 11263 v.reset(OpAMD64MOVOload) 11264 v.AuxInt = int32ToAuxInt(off1 + off2) 11265 v.Aux = symToAux(mergeSym(sym1, sym2)) 11266 v.AddArg2(base, mem) 11267 return true 11268 } 11269 return false 11270 } 11271 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { 11272 v_2 := v.Args[2] 11273 v_1 := v.Args[1] 11274 v_0 := v.Args[0] 11275 b := v.Block 11276 config := b.Func.Config 11277 typ := &b.Func.Config.Types 11278 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11279 // cond: is32Bit(int64(off1)+int64(off2)) 11280 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 11281 for { 11282 off1 := auxIntToInt32(v.AuxInt) 11283 sym := auxToSym(v.Aux) 11284 if v_0.Op != OpAMD64ADDQconst { 11285 break 11286 } 11287 off2 := auxIntToInt32(v_0.AuxInt) 11288 ptr := v_0.Args[0] 11289 val := v_1 11290 mem := v_2 11291 if !(is32Bit(int64(off1) + int64(off2))) { 11292 break 11293 } 11294 v.reset(OpAMD64MOVOstore) 11295 v.AuxInt = int32ToAuxInt(off1 + off2) 11296 v.Aux = symToAux(sym) 11297 v.AddArg3(ptr, val, mem) 11298 return true 11299 } 11300 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11301 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 11302 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11303 for { 11304 off1 := auxIntToInt32(v.AuxInt) 11305 sym1 := auxToSym(v.Aux) 11306 if v_0.Op != OpAMD64LEAQ { 11307 break 11308 } 11309 off2 := auxIntToInt32(v_0.AuxInt) 11310 sym2 := auxToSym(v_0.Aux) 11311 base := v_0.Args[0] 11312 val := v_1 11313 mem := v_2 11314 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 11315 break 11316 } 11317 v.reset(OpAMD64MOVOstore) 11318 v.AuxInt = int32ToAuxInt(off1 + off2) 11319 v.Aux = symToAux(mergeSym(sym1, sym2)) 11320 v.AddArg3(base, val, mem) 11321 return true 11322 } 11323 // match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) 11324 // cond: symIsRO(srcSym) 11325 // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem)) 11326 for { 11327 dstOff := auxIntToInt32(v.AuxInt) 11328 dstSym := auxToSym(v.Aux) 11329 ptr := v_0 11330 if v_1.Op != OpAMD64MOVOload { 11331 break 11332 } 11333 srcOff := auxIntToInt32(v_1.AuxInt) 11334 srcSym := auxToSym(v_1.Aux) 11335 v_1_0 := v_1.Args[0] 11336 if v_1_0.Op != OpSB { 11337 break 11338 } 11339 mem := v_2 11340 if !(symIsRO(srcSym)) { 11341 break 11342 } 11343 v.reset(OpAMD64MOVQstore) 11344 v.AuxInt = int32ToAuxInt(dstOff + 8) 11345 v.Aux = symToAux(dstSym) 11346 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) 11347 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))) 11348 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem) 11349 v1.AuxInt = int32ToAuxInt(dstOff) 11350 v1.Aux = symToAux(dstSym) 11351 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) 11352 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))) 11353 v1.AddArg3(ptr, v2, mem) 11354 v.AddArg3(ptr, v0, v1) 11355 return true 11356 } 11357 return false 11358 } 11359 func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool { 11360 v_1 := v.Args[1] 11361 v_0 := v.Args[0] 11362 // match: (MOVOstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 11363 // cond: ValAndOff(sc).canAdd32(off) 11364 // result: (MOVOstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) 11365 for { 11366 sc := auxIntToValAndOff(v.AuxInt) 11367 s := auxToSym(v.Aux) 11368 if v_0.Op != OpAMD64ADDQconst { 11369 break 11370 } 11371 off := auxIntToInt32(v_0.AuxInt) 11372 ptr := v_0.Args[0] 11373 mem := v_1 11374 if !(ValAndOff(sc).canAdd32(off)) { 11375 break 11376 } 11377 v.reset(OpAMD64MOVOstoreconst) 11378 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 11379 v.Aux = symToAux(s) 11380 v.AddArg2(ptr, mem) 11381 return true 11382 } 11383 // match: (MOVOstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 11384 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) 11385 // result: (MOVOstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) 11386 for { 11387 sc := auxIntToValAndOff(v.AuxInt) 11388 sym1 := auxToSym(v.Aux) 11389 if v_0.Op != OpAMD64LEAQ { 11390 break 11391 } 11392 off := auxIntToInt32(v_0.AuxInt) 11393 sym2 := auxToSym(v_0.Aux) 11394 ptr := v_0.Args[0] 11395 mem := v_1 11396 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { 11397 break 11398 } 11399 v.reset(OpAMD64MOVOstoreconst) 11400 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 11401 v.Aux = symToAux(mergeSym(sym1, sym2)) 11402 v.AddArg2(ptr, mem) 11403 return true 11404 } 11405 return false 11406 } 11407 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool { 11408 v_1 := v.Args[1] 11409 v_0 := v.Args[0] 11410 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 11411 // cond: is32Bit(int64(off1)+int64(off2)) 11412 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 11413 for { 11414 off1 := auxIntToInt32(v.AuxInt) 11415 sym := auxToSym(v.Aux) 11416 if v_0.Op != OpAMD64ADDQconst { 11417 break 11418 } 11419 off2 := auxIntToInt32(v_0.AuxInt) 11420 ptr := v_0.Args[0] 11421 mem := v_1 11422 if !(is32Bit(int64(off1) + int64(off2))) { 11423 break 11424 } 11425 v.reset(OpAMD64MOVQatomicload) 11426 v.AuxInt = int32ToAuxInt(off1 + off2) 11427 v.Aux = symToAux(sym) 11428 v.AddArg2(ptr, mem) 11429 return true 11430 } 11431 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 11432 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 11433 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem) 11434 for { 11435 off1 := auxIntToInt32(v.AuxInt) 11436 sym1 := auxToSym(v.Aux) 11437 if v_0.Op != OpAMD64LEAQ { 11438 break 11439 } 11440 off2 := auxIntToInt32(v_0.AuxInt) 11441 sym2 := auxToSym(v_0.Aux) 11442 ptr := v_0.Args[0] 11443 mem := v_1 11444 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 11445 break 11446 } 11447 v.reset(OpAMD64MOVQatomicload) 11448 v.AuxInt = int32ToAuxInt(off1 + off2) 11449 v.Aux = symToAux(mergeSym(sym1, sym2)) 11450 v.AddArg2(ptr, mem) 11451 return true 11452 } 11453 return false 11454 } 11455 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool { 11456 v_0 := v.Args[0] 11457 b := v.Block 11458 // match: (MOVQf2i <t> (Arg <u> [off] {sym})) 11459 // cond: t.Size() == u.Size() 11460 // result: @b.Func.Entry (Arg <t> [off] {sym}) 11461 for { 11462 t := v.Type 11463 if v_0.Op != OpArg { 11464 break 11465 } 11466 u := v_0.Type 11467 off := auxIntToInt32(v_0.AuxInt) 11468 sym := auxToSym(v_0.Aux) 11469 if !(t.Size() == u.Size()) { 11470 break 11471 } 11472 b = b.Func.Entry 11473 v0 := b.NewValue0(v.Pos, OpArg, t) 11474 v.copyOf(v0) 11475 v0.AuxInt = int32ToAuxInt(off) 11476 v0.Aux = symToAux(sym) 11477 return true 11478 } 11479 return false 11480 } 11481 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool { 11482 v_0 := v.Args[0] 11483 b := v.Block 11484 // match: (MOVQi2f <t> (Arg <u> [off] {sym})) 11485 // cond: t.Size() == u.Size() 11486 // result: @b.Func.Entry (Arg <t> [off] {sym}) 11487 for { 11488 t := v.Type 11489 if v_0.Op != OpArg { 11490 break 11491 } 11492 u := v_0.Type 11493 off := auxIntToInt32(v_0.AuxInt) 11494 sym := auxToSym(v_0.Aux) 11495 if !(t.Size() == u.Size()) { 11496 break 11497 } 11498 b = b.Func.Entry 11499 v0 := b.NewValue0(v.Pos, OpArg, t) 11500 v.copyOf(v0) 11501 v0.AuxInt = int32ToAuxInt(off) 11502 v0.Aux = symToAux(sym) 11503 return true 11504 } 11505 return false 11506 } 11507 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { 11508 v_1 := v.Args[1] 11509 v_0 := v.Args[0] 11510 b := v.Block 11511 config := b.Func.Config 11512 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 11513 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 11514 // result: x 11515 for { 11516 off := auxIntToInt32(v.AuxInt) 11517 sym := auxToSym(v.Aux) 11518 ptr := v_0 11519 if v_1.Op != OpAMD64MOVQstore { 11520 break 11521 } 11522 off2 := auxIntToInt32(v_1.AuxInt) 11523 sym2 := auxToSym(v_1.Aux) 11524 x := v_1.Args[1] 11525 ptr2 := v_1.Args[0] 11526 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 11527 break 11528 } 11529 v.copyOf(x) 11530 return true 11531 } 11532 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 11533 // cond: is32Bit(int64(off1)+int64(off2)) 11534 // result: (MOVQload [off1+off2] {sym} ptr mem) 11535 for { 11536 off1 := auxIntToInt32(v.AuxInt) 11537 sym := auxToSym(v.Aux) 11538 if v_0.Op != OpAMD64ADDQconst { 11539 break 11540 } 11541 off2 := auxIntToInt32(v_0.AuxInt) 11542 ptr := v_0.Args[0] 11543 mem := v_1 11544 if !(is32Bit(int64(off1) + int64(off2))) { 11545 break 11546 } 11547 v.reset(OpAMD64MOVQload) 11548 v.AuxInt = int32ToAuxInt(off1 + off2) 11549 v.Aux = symToAux(sym) 11550 v.AddArg2(ptr, mem) 11551 return true 11552 } 11553 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11554 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 11555 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11556 for { 11557 off1 := auxIntToInt32(v.AuxInt) 11558 sym1 := auxToSym(v.Aux) 11559 if v_0.Op != OpAMD64LEAQ { 11560 break 11561 } 11562 off2 := auxIntToInt32(v_0.AuxInt) 11563 sym2 := auxToSym(v_0.Aux) 11564 base := v_0.Args[0] 11565 mem := v_1 11566 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 11567 break 11568 } 11569 v.reset(OpAMD64MOVQload) 11570 v.AuxInt = int32ToAuxInt(off1 + off2) 11571 v.Aux = symToAux(mergeSym(sym1, sym2)) 11572 v.AddArg2(base, mem) 11573 return true 11574 } 11575 // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) 11576 // result: (MOVQf2i val) 11577 for { 11578 off := auxIntToInt32(v.AuxInt) 11579 sym := auxToSym(v.Aux) 11580 ptr := v_0 11581 if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { 11582 break 11583 } 11584 val := v_1.Args[1] 11585 if ptr != v_1.Args[0] { 11586 break 11587 } 11588 v.reset(OpAMD64MOVQf2i) 11589 v.AddArg(val) 11590 return true 11591 } 11592 // match: (MOVQload [off] {sym} (SB) _) 11593 // cond: symIsRO(sym) 11594 // result: (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) 11595 for { 11596 off := auxIntToInt32(v.AuxInt) 11597 sym := auxToSym(v.Aux) 11598 if v_0.Op != OpSB || !(symIsRO(sym)) { 11599 break 11600 } 11601 v.reset(OpAMD64MOVQconst) 11602 v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))) 11603 return true 11604 } 11605 return false 11606 } 11607 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { 11608 v_2 := v.Args[2] 11609 v_1 := v.Args[1] 11610 v_0 := v.Args[0] 11611 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11612 // cond: is32Bit(int64(off1)+int64(off2)) 11613 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 11614 for { 11615 off1 := auxIntToInt32(v.AuxInt) 11616 sym := auxToSym(v.Aux) 11617 if v_0.Op != OpAMD64ADDQconst { 11618 break 11619 } 11620 off2 := auxIntToInt32(v_0.AuxInt) 11621 ptr := v_0.Args[0] 11622 val := v_1 11623 mem := v_2 11624 if !(is32Bit(int64(off1) + int64(off2))) { 11625 break 11626 } 11627 v.reset(OpAMD64MOVQstore) 11628 v.AuxInt = int32ToAuxInt(off1 + off2) 11629 v.Aux = symToAux(sym) 11630 v.AddArg3(ptr, val, mem) 11631 return true 11632 } 11633 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 11634 // cond: validVal(c) 11635 // result: (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) 11636 for { 11637 off := auxIntToInt32(v.AuxInt) 11638 sym := auxToSym(v.Aux) 11639 ptr := v_0 11640 if v_1.Op != OpAMD64MOVQconst { 11641 break 11642 } 11643 c := auxIntToInt64(v_1.AuxInt) 11644 mem := v_2 11645 if !(validVal(c)) { 11646 break 11647 } 11648 v.reset(OpAMD64MOVQstoreconst) 11649 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 11650 v.Aux = symToAux(sym) 11651 v.AddArg2(ptr, mem) 11652 return true 11653 } 11654 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11655 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 11656 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11657 for { 11658 off1 := auxIntToInt32(v.AuxInt) 11659 sym1 := auxToSym(v.Aux) 11660 if v_0.Op != OpAMD64LEAQ { 11661 break 11662 } 11663 off2 := auxIntToInt32(v_0.AuxInt) 11664 sym2 := auxToSym(v_0.Aux) 11665 base := v_0.Args[0] 11666 val := v_1 11667 mem := v_2 11668 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 11669 break 11670 } 11671 v.reset(OpAMD64MOVQstore) 11672 v.AuxInt = int32ToAuxInt(off1 + off2) 11673 v.Aux = symToAux(mergeSym(sym1, sym2)) 11674 v.AddArg3(base, val, mem) 11675 return true 11676 } 11677 // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem) 11678 // cond: y.Uses==1 && clobber(y) 11679 // result: (ADDQmodify [off] {sym} ptr x mem) 11680 for { 11681 off := auxIntToInt32(v.AuxInt) 11682 sym := auxToSym(v.Aux) 11683 ptr := v_0 11684 y := v_1 11685 if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 11686 break 11687 } 11688 mem := y.Args[2] 11689 x := y.Args[0] 11690 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 11691 break 11692 } 11693 v.reset(OpAMD64ADDQmodify) 11694 v.AuxInt = int32ToAuxInt(off) 11695 v.Aux = symToAux(sym) 11696 v.AddArg3(ptr, x, mem) 11697 return true 11698 } 11699 // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem) 11700 // cond: y.Uses==1 && clobber(y) 11701 // result: (ANDQmodify [off] {sym} ptr x mem) 11702 for { 11703 off := auxIntToInt32(v.AuxInt) 11704 sym := auxToSym(v.Aux) 11705 ptr := v_0 11706 y := v_1 11707 if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 11708 break 11709 } 11710 mem := y.Args[2] 11711 x := y.Args[0] 11712 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 11713 break 11714 } 11715 v.reset(OpAMD64ANDQmodify) 11716 v.AuxInt = int32ToAuxInt(off) 11717 v.Aux = symToAux(sym) 11718 v.AddArg3(ptr, x, mem) 11719 return true 11720 } 11721 // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem) 11722 // cond: y.Uses==1 && clobber(y) 11723 // result: (ORQmodify [off] {sym} ptr x mem) 11724 for { 11725 off := auxIntToInt32(v.AuxInt) 11726 sym := auxToSym(v.Aux) 11727 ptr := v_0 11728 y := v_1 11729 if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 11730 break 11731 } 11732 mem := y.Args[2] 11733 x := y.Args[0] 11734 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 11735 break 11736 } 11737 v.reset(OpAMD64ORQmodify) 11738 v.AuxInt = int32ToAuxInt(off) 11739 v.Aux = symToAux(sym) 11740 v.AddArg3(ptr, x, mem) 11741 return true 11742 } 11743 // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem) 11744 // cond: y.Uses==1 && clobber(y) 11745 // result: (XORQmodify [off] {sym} ptr x mem) 11746 for { 11747 off := auxIntToInt32(v.AuxInt) 11748 sym := auxToSym(v.Aux) 11749 ptr := v_0 11750 y := v_1 11751 if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { 11752 break 11753 } 11754 mem := y.Args[2] 11755 x := y.Args[0] 11756 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { 11757 break 11758 } 11759 v.reset(OpAMD64XORQmodify) 11760 v.AuxInt = int32ToAuxInt(off) 11761 v.Aux = symToAux(sym) 11762 v.AddArg3(ptr, x, mem) 11763 return true 11764 } 11765 // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem) 11766 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 11767 // result: (ADDQmodify [off] {sym} ptr x mem) 11768 for { 11769 off := auxIntToInt32(v.AuxInt) 11770 sym := auxToSym(v.Aux) 11771 ptr := v_0 11772 y := v_1 11773 if y.Op != OpAMD64ADDQ { 11774 break 11775 } 11776 _ = y.Args[1] 11777 y_0 := y.Args[0] 11778 y_1 := y.Args[1] 11779 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 11780 l := y_0 11781 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11782 continue 11783 } 11784 mem := l.Args[1] 11785 if ptr != l.Args[0] { 11786 continue 11787 } 11788 x := y_1 11789 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 11790 continue 11791 } 11792 v.reset(OpAMD64ADDQmodify) 11793 v.AuxInt = int32ToAuxInt(off) 11794 v.Aux = symToAux(sym) 11795 v.AddArg3(ptr, x, mem) 11796 return true 11797 } 11798 break 11799 } 11800 // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem) 11801 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 11802 // result: (SUBQmodify [off] {sym} ptr x mem) 11803 for { 11804 off := auxIntToInt32(v.AuxInt) 11805 sym := auxToSym(v.Aux) 11806 ptr := v_0 11807 y := v_1 11808 if y.Op != OpAMD64SUBQ { 11809 break 11810 } 11811 x := y.Args[1] 11812 l := y.Args[0] 11813 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11814 break 11815 } 11816 mem := l.Args[1] 11817 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 11818 break 11819 } 11820 v.reset(OpAMD64SUBQmodify) 11821 v.AuxInt = int32ToAuxInt(off) 11822 v.Aux = symToAux(sym) 11823 v.AddArg3(ptr, x, mem) 11824 return true 11825 } 11826 // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem) 11827 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 11828 // result: (ANDQmodify [off] {sym} ptr x mem) 11829 for { 11830 off := auxIntToInt32(v.AuxInt) 11831 sym := auxToSym(v.Aux) 11832 ptr := v_0 11833 y := v_1 11834 if y.Op != OpAMD64ANDQ { 11835 break 11836 } 11837 _ = y.Args[1] 11838 y_0 := y.Args[0] 11839 y_1 := y.Args[1] 11840 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 11841 l := y_0 11842 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11843 continue 11844 } 11845 mem := l.Args[1] 11846 if ptr != l.Args[0] { 11847 continue 11848 } 11849 x := y_1 11850 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 11851 continue 11852 } 11853 v.reset(OpAMD64ANDQmodify) 11854 v.AuxInt = int32ToAuxInt(off) 11855 v.Aux = symToAux(sym) 11856 v.AddArg3(ptr, x, mem) 11857 return true 11858 } 11859 break 11860 } 11861 // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem) 11862 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 11863 // result: (ORQmodify [off] {sym} ptr x mem) 11864 for { 11865 off := auxIntToInt32(v.AuxInt) 11866 sym := auxToSym(v.Aux) 11867 ptr := v_0 11868 y := v_1 11869 if y.Op != OpAMD64ORQ { 11870 break 11871 } 11872 _ = y.Args[1] 11873 y_0 := y.Args[0] 11874 y_1 := y.Args[1] 11875 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 11876 l := y_0 11877 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11878 continue 11879 } 11880 mem := l.Args[1] 11881 if ptr != l.Args[0] { 11882 continue 11883 } 11884 x := y_1 11885 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 11886 continue 11887 } 11888 v.reset(OpAMD64ORQmodify) 11889 v.AuxInt = int32ToAuxInt(off) 11890 v.Aux = symToAux(sym) 11891 v.AddArg3(ptr, x, mem) 11892 return true 11893 } 11894 break 11895 } 11896 // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem) 11897 // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) 11898 // result: (XORQmodify [off] {sym} ptr x mem) 11899 for { 11900 off := auxIntToInt32(v.AuxInt) 11901 sym := auxToSym(v.Aux) 11902 ptr := v_0 11903 y := v_1 11904 if y.Op != OpAMD64XORQ { 11905 break 11906 } 11907 _ = y.Args[1] 11908 y_0 := y.Args[0] 11909 y_1 := y.Args[1] 11910 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { 11911 l := y_0 11912 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11913 continue 11914 } 11915 mem := l.Args[1] 11916 if ptr != l.Args[0] { 11917 continue 11918 } 11919 x := y_1 11920 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { 11921 continue 11922 } 11923 v.reset(OpAMD64XORQmodify) 11924 v.AuxInt = int32ToAuxInt(off) 11925 v.Aux = symToAux(sym) 11926 v.AddArg3(ptr, x, mem) 11927 return true 11928 } 11929 break 11930 } 11931 // match: (MOVQstore {sym} [off] ptr x:(BTSQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) 11932 // cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l) 11933 // result: (BTSQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 11934 for { 11935 off := auxIntToInt32(v.AuxInt) 11936 sym := auxToSym(v.Aux) 11937 ptr := v_0 11938 x := v_1 11939 if x.Op != OpAMD64BTSQconst { 11940 break 11941 } 11942 c := auxIntToInt8(x.AuxInt) 11943 l := x.Args[0] 11944 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11945 break 11946 } 11947 mem := l.Args[1] 11948 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) { 11949 break 11950 } 11951 v.reset(OpAMD64BTSQconstmodify) 11952 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 11953 v.Aux = symToAux(sym) 11954 v.AddArg2(ptr, mem) 11955 return true 11956 } 11957 // match: (MOVQstore {sym} [off] ptr x:(BTRQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) 11958 // cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l) 11959 // result: (BTRQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 11960 for { 11961 off := auxIntToInt32(v.AuxInt) 11962 sym := auxToSym(v.Aux) 11963 ptr := v_0 11964 x := v_1 11965 if x.Op != OpAMD64BTRQconst { 11966 break 11967 } 11968 c := auxIntToInt8(x.AuxInt) 11969 l := x.Args[0] 11970 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11971 break 11972 } 11973 mem := l.Args[1] 11974 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) { 11975 break 11976 } 11977 v.reset(OpAMD64BTRQconstmodify) 11978 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 11979 v.Aux = symToAux(sym) 11980 v.AddArg2(ptr, mem) 11981 return true 11982 } 11983 // match: (MOVQstore {sym} [off] ptr x:(BTCQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) 11984 // cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l) 11985 // result: (BTCQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 11986 for { 11987 off := auxIntToInt32(v.AuxInt) 11988 sym := auxToSym(v.Aux) 11989 ptr := v_0 11990 x := v_1 11991 if x.Op != OpAMD64BTCQconst { 11992 break 11993 } 11994 c := auxIntToInt8(x.AuxInt) 11995 l := x.Args[0] 11996 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 11997 break 11998 } 11999 mem := l.Args[1] 12000 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) { 12001 break 12002 } 12003 v.reset(OpAMD64BTCQconstmodify) 12004 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 12005 v.Aux = symToAux(sym) 12006 v.AddArg2(ptr, mem) 12007 return true 12008 } 12009 // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 12010 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 12011 // result: (ADDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 12012 for { 12013 off := auxIntToInt32(v.AuxInt) 12014 sym := auxToSym(v.Aux) 12015 ptr := v_0 12016 a := v_1 12017 if a.Op != OpAMD64ADDQconst { 12018 break 12019 } 12020 c := auxIntToInt32(a.AuxInt) 12021 l := a.Args[0] 12022 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12023 break 12024 } 12025 mem := l.Args[1] 12026 ptr2 := l.Args[0] 12027 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 12028 break 12029 } 12030 v.reset(OpAMD64ADDQconstmodify) 12031 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 12032 v.Aux = symToAux(sym) 12033 v.AddArg2(ptr, mem) 12034 return true 12035 } 12036 // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 12037 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 12038 // result: (ANDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 12039 for { 12040 off := auxIntToInt32(v.AuxInt) 12041 sym := auxToSym(v.Aux) 12042 ptr := v_0 12043 a := v_1 12044 if a.Op != OpAMD64ANDQconst { 12045 break 12046 } 12047 c := auxIntToInt32(a.AuxInt) 12048 l := a.Args[0] 12049 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12050 break 12051 } 12052 mem := l.Args[1] 12053 ptr2 := l.Args[0] 12054 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 12055 break 12056 } 12057 v.reset(OpAMD64ANDQconstmodify) 12058 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 12059 v.Aux = symToAux(sym) 12060 v.AddArg2(ptr, mem) 12061 return true 12062 } 12063 // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 12064 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 12065 // result: (ORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 12066 for { 12067 off := auxIntToInt32(v.AuxInt) 12068 sym := auxToSym(v.Aux) 12069 ptr := v_0 12070 a := v_1 12071 if a.Op != OpAMD64ORQconst { 12072 break 12073 } 12074 c := auxIntToInt32(a.AuxInt) 12075 l := a.Args[0] 12076 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12077 break 12078 } 12079 mem := l.Args[1] 12080 ptr2 := l.Args[0] 12081 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 12082 break 12083 } 12084 v.reset(OpAMD64ORQconstmodify) 12085 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 12086 v.Aux = symToAux(sym) 12087 v.AddArg2(ptr, mem) 12088 return true 12089 } 12090 // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 12091 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) 12092 // result: (XORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) 12093 for { 12094 off := auxIntToInt32(v.AuxInt) 12095 sym := auxToSym(v.Aux) 12096 ptr := v_0 12097 a := v_1 12098 if a.Op != OpAMD64XORQconst { 12099 break 12100 } 12101 c := auxIntToInt32(a.AuxInt) 12102 l := a.Args[0] 12103 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { 12104 break 12105 } 12106 mem := l.Args[1] 12107 ptr2 := l.Args[0] 12108 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { 12109 break 12110 } 12111 v.reset(OpAMD64XORQconstmodify) 12112 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) 12113 v.Aux = symToAux(sym) 12114 v.AddArg2(ptr, mem) 12115 return true 12116 } 12117 // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) 12118 // result: (MOVSDstore [off] {sym} ptr val mem) 12119 for { 12120 off := auxIntToInt32(v.AuxInt) 12121 sym := auxToSym(v.Aux) 12122 ptr := v_0 12123 if v_1.Op != OpAMD64MOVQf2i { 12124 break 12125 } 12126 val := v_1.Args[0] 12127 mem := v_2 12128 v.reset(OpAMD64MOVSDstore) 12129 v.AuxInt = int32ToAuxInt(off) 12130 v.Aux = symToAux(sym) 12131 v.AddArg3(ptr, val, mem) 12132 return true 12133 } 12134 // match: (MOVQstore [i] {s} p x:(BSWAPQ w) mem) 12135 // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 12136 // result: (MOVBEQstore [i] {s} p w mem) 12137 for { 12138 i := auxIntToInt32(v.AuxInt) 12139 s := auxToSym(v.Aux) 12140 p := v_0 12141 x := v_1 12142 if x.Op != OpAMD64BSWAPQ { 12143 break 12144 } 12145 w := x.Args[0] 12146 mem := v_2 12147 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { 12148 break 12149 } 12150 v.reset(OpAMD64MOVBEQstore) 12151 v.AuxInt = int32ToAuxInt(i) 12152 v.Aux = symToAux(s) 12153 v.AddArg3(p, w, mem) 12154 return true 12155 } 12156 return false 12157 } 12158 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { 12159 v_1 := v.Args[1] 12160 v_0 := v.Args[0] 12161 b := v.Block 12162 config := b.Func.Config 12163 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 12164 // cond: ValAndOff(sc).canAdd32(off) 12165 // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) 12166 for { 12167 sc := auxIntToValAndOff(v.AuxInt) 12168 s := auxToSym(v.Aux) 12169 if v_0.Op != OpAMD64ADDQconst { 12170 break 12171 } 12172 off := auxIntToInt32(v_0.AuxInt) 12173 ptr := v_0.Args[0] 12174 mem := v_1 12175 if !(ValAndOff(sc).canAdd32(off)) { 12176 break 12177 } 12178 v.reset(OpAMD64MOVQstoreconst) 12179 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 12180 v.Aux = symToAux(s) 12181 v.AddArg2(ptr, mem) 12182 return true 12183 } 12184 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 12185 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) 12186 // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) 12187 for { 12188 sc := auxIntToValAndOff(v.AuxInt) 12189 sym1 := auxToSym(v.Aux) 12190 if v_0.Op != OpAMD64LEAQ { 12191 break 12192 } 12193 off := auxIntToInt32(v_0.AuxInt) 12194 sym2 := auxToSym(v_0.Aux) 12195 ptr := v_0.Args[0] 12196 mem := v_1 12197 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { 12198 break 12199 } 12200 v.reset(OpAMD64MOVQstoreconst) 12201 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 12202 v.Aux = symToAux(mergeSym(sym1, sym2)) 12203 v.AddArg2(ptr, mem) 12204 return true 12205 } 12206 // match: (MOVQstoreconst [c] {s} p1 x:(MOVQstoreconst [a] {s} p0 mem)) 12207 // cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x) 12208 // result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem) 12209 for { 12210 c := auxIntToValAndOff(v.AuxInt) 12211 s := auxToSym(v.Aux) 12212 p1 := v_0 12213 x := v_1 12214 if x.Op != OpAMD64MOVQstoreconst { 12215 break 12216 } 12217 a := auxIntToValAndOff(x.AuxInt) 12218 if auxToSym(x.Aux) != s { 12219 break 12220 } 12221 mem := x.Args[1] 12222 p0 := x.Args[0] 12223 if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) { 12224 break 12225 } 12226 v.reset(OpAMD64MOVOstoreconst) 12227 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off())) 12228 v.Aux = symToAux(s) 12229 v.AddArg2(p0, mem) 12230 return true 12231 } 12232 // match: (MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem)) 12233 // cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x) 12234 // result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem) 12235 for { 12236 a := auxIntToValAndOff(v.AuxInt) 12237 s := auxToSym(v.Aux) 12238 p0 := v_0 12239 x := v_1 12240 if x.Op != OpAMD64MOVQstoreconst { 12241 break 12242 } 12243 c := auxIntToValAndOff(x.AuxInt) 12244 if auxToSym(x.Aux) != s { 12245 break 12246 } 12247 mem := x.Args[1] 12248 p1 := x.Args[0] 12249 if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) { 12250 break 12251 } 12252 v.reset(OpAMD64MOVOstoreconst) 12253 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off())) 12254 v.Aux = symToAux(s) 12255 v.AddArg2(p0, mem) 12256 return true 12257 } 12258 return false 12259 } 12260 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { 12261 v_1 := v.Args[1] 12262 v_0 := v.Args[0] 12263 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 12264 // cond: is32Bit(int64(off1)+int64(off2)) 12265 // result: (MOVSDload [off1+off2] {sym} ptr mem) 12266 for { 12267 off1 := auxIntToInt32(v.AuxInt) 12268 sym := auxToSym(v.Aux) 12269 if v_0.Op != OpAMD64ADDQconst { 12270 break 12271 } 12272 off2 := auxIntToInt32(v_0.AuxInt) 12273 ptr := v_0.Args[0] 12274 mem := v_1 12275 if !(is32Bit(int64(off1) + int64(off2))) { 12276 break 12277 } 12278 v.reset(OpAMD64MOVSDload) 12279 v.AuxInt = int32ToAuxInt(off1 + off2) 12280 v.Aux = symToAux(sym) 12281 v.AddArg2(ptr, mem) 12282 return true 12283 } 12284 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12285 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 12286 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12287 for { 12288 off1 := auxIntToInt32(v.AuxInt) 12289 sym1 := auxToSym(v.Aux) 12290 if v_0.Op != OpAMD64LEAQ { 12291 break 12292 } 12293 off2 := auxIntToInt32(v_0.AuxInt) 12294 sym2 := auxToSym(v_0.Aux) 12295 base := v_0.Args[0] 12296 mem := v_1 12297 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 12298 break 12299 } 12300 v.reset(OpAMD64MOVSDload) 12301 v.AuxInt = int32ToAuxInt(off1 + off2) 12302 v.Aux = symToAux(mergeSym(sym1, sym2)) 12303 v.AddArg2(base, mem) 12304 return true 12305 } 12306 // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) 12307 // result: (MOVQi2f val) 12308 for { 12309 off := auxIntToInt32(v.AuxInt) 12310 sym := auxToSym(v.Aux) 12311 ptr := v_0 12312 if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { 12313 break 12314 } 12315 val := v_1.Args[1] 12316 if ptr != v_1.Args[0] { 12317 break 12318 } 12319 v.reset(OpAMD64MOVQi2f) 12320 v.AddArg(val) 12321 return true 12322 } 12323 return false 12324 } 12325 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { 12326 v_2 := v.Args[2] 12327 v_1 := v.Args[1] 12328 v_0 := v.Args[0] 12329 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 12330 // cond: is32Bit(int64(off1)+int64(off2)) 12331 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 12332 for { 12333 off1 := auxIntToInt32(v.AuxInt) 12334 sym := auxToSym(v.Aux) 12335 if v_0.Op != OpAMD64ADDQconst { 12336 break 12337 } 12338 off2 := auxIntToInt32(v_0.AuxInt) 12339 ptr := v_0.Args[0] 12340 val := v_1 12341 mem := v_2 12342 if !(is32Bit(int64(off1) + int64(off2))) { 12343 break 12344 } 12345 v.reset(OpAMD64MOVSDstore) 12346 v.AuxInt = int32ToAuxInt(off1 + off2) 12347 v.Aux = symToAux(sym) 12348 v.AddArg3(ptr, val, mem) 12349 return true 12350 } 12351 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 12352 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 12353 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 12354 for { 12355 off1 := auxIntToInt32(v.AuxInt) 12356 sym1 := auxToSym(v.Aux) 12357 if v_0.Op != OpAMD64LEAQ { 12358 break 12359 } 12360 off2 := auxIntToInt32(v_0.AuxInt) 12361 sym2 := auxToSym(v_0.Aux) 12362 base := v_0.Args[0] 12363 val := v_1 12364 mem := v_2 12365 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 12366 break 12367 } 12368 v.reset(OpAMD64MOVSDstore) 12369 v.AuxInt = int32ToAuxInt(off1 + off2) 12370 v.Aux = symToAux(mergeSym(sym1, sym2)) 12371 v.AddArg3(base, val, mem) 12372 return true 12373 } 12374 // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) 12375 // result: (MOVQstore [off] {sym} ptr val mem) 12376 for { 12377 off := auxIntToInt32(v.AuxInt) 12378 sym := auxToSym(v.Aux) 12379 ptr := v_0 12380 if v_1.Op != OpAMD64MOVQi2f { 12381 break 12382 } 12383 val := v_1.Args[0] 12384 mem := v_2 12385 v.reset(OpAMD64MOVQstore) 12386 v.AuxInt = int32ToAuxInt(off) 12387 v.Aux = symToAux(sym) 12388 v.AddArg3(ptr, val, mem) 12389 return true 12390 } 12391 return false 12392 } 12393 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { 12394 v_1 := v.Args[1] 12395 v_0 := v.Args[0] 12396 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 12397 // cond: is32Bit(int64(off1)+int64(off2)) 12398 // result: (MOVSSload [off1+off2] {sym} ptr mem) 12399 for { 12400 off1 := auxIntToInt32(v.AuxInt) 12401 sym := auxToSym(v.Aux) 12402 if v_0.Op != OpAMD64ADDQconst { 12403 break 12404 } 12405 off2 := auxIntToInt32(v_0.AuxInt) 12406 ptr := v_0.Args[0] 12407 mem := v_1 12408 if !(is32Bit(int64(off1) + int64(off2))) { 12409 break 12410 } 12411 v.reset(OpAMD64MOVSSload) 12412 v.AuxInt = int32ToAuxInt(off1 + off2) 12413 v.Aux = symToAux(sym) 12414 v.AddArg2(ptr, mem) 12415 return true 12416 } 12417 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12418 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 12419 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12420 for { 12421 off1 := auxIntToInt32(v.AuxInt) 12422 sym1 := auxToSym(v.Aux) 12423 if v_0.Op != OpAMD64LEAQ { 12424 break 12425 } 12426 off2 := auxIntToInt32(v_0.AuxInt) 12427 sym2 := auxToSym(v_0.Aux) 12428 base := v_0.Args[0] 12429 mem := v_1 12430 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 12431 break 12432 } 12433 v.reset(OpAMD64MOVSSload) 12434 v.AuxInt = int32ToAuxInt(off1 + off2) 12435 v.Aux = symToAux(mergeSym(sym1, sym2)) 12436 v.AddArg2(base, mem) 12437 return true 12438 } 12439 // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) 12440 // result: (MOVLi2f val) 12441 for { 12442 off := auxIntToInt32(v.AuxInt) 12443 sym := auxToSym(v.Aux) 12444 ptr := v_0 12445 if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { 12446 break 12447 } 12448 val := v_1.Args[1] 12449 if ptr != v_1.Args[0] { 12450 break 12451 } 12452 v.reset(OpAMD64MOVLi2f) 12453 v.AddArg(val) 12454 return true 12455 } 12456 return false 12457 } 12458 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { 12459 v_2 := v.Args[2] 12460 v_1 := v.Args[1] 12461 v_0 := v.Args[0] 12462 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 12463 // cond: is32Bit(int64(off1)+int64(off2)) 12464 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 12465 for { 12466 off1 := auxIntToInt32(v.AuxInt) 12467 sym := auxToSym(v.Aux) 12468 if v_0.Op != OpAMD64ADDQconst { 12469 break 12470 } 12471 off2 := auxIntToInt32(v_0.AuxInt) 12472 ptr := v_0.Args[0] 12473 val := v_1 12474 mem := v_2 12475 if !(is32Bit(int64(off1) + int64(off2))) { 12476 break 12477 } 12478 v.reset(OpAMD64MOVSSstore) 12479 v.AuxInt = int32ToAuxInt(off1 + off2) 12480 v.Aux = symToAux(sym) 12481 v.AddArg3(ptr, val, mem) 12482 return true 12483 } 12484 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 12485 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 12486 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 12487 for { 12488 off1 := auxIntToInt32(v.AuxInt) 12489 sym1 := auxToSym(v.Aux) 12490 if v_0.Op != OpAMD64LEAQ { 12491 break 12492 } 12493 off2 := auxIntToInt32(v_0.AuxInt) 12494 sym2 := auxToSym(v_0.Aux) 12495 base := v_0.Args[0] 12496 val := v_1 12497 mem := v_2 12498 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 12499 break 12500 } 12501 v.reset(OpAMD64MOVSSstore) 12502 v.AuxInt = int32ToAuxInt(off1 + off2) 12503 v.Aux = symToAux(mergeSym(sym1, sym2)) 12504 v.AddArg3(base, val, mem) 12505 return true 12506 } 12507 // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) 12508 // result: (MOVLstore [off] {sym} ptr val mem) 12509 for { 12510 off := auxIntToInt32(v.AuxInt) 12511 sym := auxToSym(v.Aux) 12512 ptr := v_0 12513 if v_1.Op != OpAMD64MOVLi2f { 12514 break 12515 } 12516 val := v_1.Args[0] 12517 mem := v_2 12518 v.reset(OpAMD64MOVLstore) 12519 v.AuxInt = int32ToAuxInt(off) 12520 v.Aux = symToAux(sym) 12521 v.AddArg3(ptr, val, mem) 12522 return true 12523 } 12524 return false 12525 } 12526 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { 12527 v_0 := v.Args[0] 12528 b := v.Block 12529 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 12530 // cond: x.Uses == 1 && clobber(x) 12531 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12532 for { 12533 x := v_0 12534 if x.Op != OpAMD64MOVWload { 12535 break 12536 } 12537 off := auxIntToInt32(x.AuxInt) 12538 sym := auxToSym(x.Aux) 12539 mem := x.Args[1] 12540 ptr := x.Args[0] 12541 if !(x.Uses == 1 && clobber(x)) { 12542 break 12543 } 12544 b = x.Block 12545 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) 12546 v.copyOf(v0) 12547 v0.AuxInt = int32ToAuxInt(off) 12548 v0.Aux = symToAux(sym) 12549 v0.AddArg2(ptr, mem) 12550 return true 12551 } 12552 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 12553 // cond: x.Uses == 1 && clobber(x) 12554 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12555 for { 12556 x := v_0 12557 if x.Op != OpAMD64MOVLload { 12558 break 12559 } 12560 off := auxIntToInt32(x.AuxInt) 12561 sym := auxToSym(x.Aux) 12562 mem := x.Args[1] 12563 ptr := x.Args[0] 12564 if !(x.Uses == 1 && clobber(x)) { 12565 break 12566 } 12567 b = x.Block 12568 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) 12569 v.copyOf(v0) 12570 v0.AuxInt = int32ToAuxInt(off) 12571 v0.Aux = symToAux(sym) 12572 v0.AddArg2(ptr, mem) 12573 return true 12574 } 12575 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 12576 // cond: x.Uses == 1 && clobber(x) 12577 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12578 for { 12579 x := v_0 12580 if x.Op != OpAMD64MOVQload { 12581 break 12582 } 12583 off := auxIntToInt32(x.AuxInt) 12584 sym := auxToSym(x.Aux) 12585 mem := x.Args[1] 12586 ptr := x.Args[0] 12587 if !(x.Uses == 1 && clobber(x)) { 12588 break 12589 } 12590 b = x.Block 12591 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) 12592 v.copyOf(v0) 12593 v0.AuxInt = int32ToAuxInt(off) 12594 v0.Aux = symToAux(sym) 12595 v0.AddArg2(ptr, mem) 12596 return true 12597 } 12598 // match: (MOVWQSX (ANDLconst [c] x)) 12599 // cond: c & 0x8000 == 0 12600 // result: (ANDLconst [c & 0x7fff] x) 12601 for { 12602 if v_0.Op != OpAMD64ANDLconst { 12603 break 12604 } 12605 c := auxIntToInt32(v_0.AuxInt) 12606 x := v_0.Args[0] 12607 if !(c&0x8000 == 0) { 12608 break 12609 } 12610 v.reset(OpAMD64ANDLconst) 12611 v.AuxInt = int32ToAuxInt(c & 0x7fff) 12612 v.AddArg(x) 12613 return true 12614 } 12615 // match: (MOVWQSX (MOVWQSX x)) 12616 // result: (MOVWQSX x) 12617 for { 12618 if v_0.Op != OpAMD64MOVWQSX { 12619 break 12620 } 12621 x := v_0.Args[0] 12622 v.reset(OpAMD64MOVWQSX) 12623 v.AddArg(x) 12624 return true 12625 } 12626 // match: (MOVWQSX (MOVBQSX x)) 12627 // result: (MOVBQSX x) 12628 for { 12629 if v_0.Op != OpAMD64MOVBQSX { 12630 break 12631 } 12632 x := v_0.Args[0] 12633 v.reset(OpAMD64MOVBQSX) 12634 v.AddArg(x) 12635 return true 12636 } 12637 return false 12638 } 12639 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool { 12640 v_1 := v.Args[1] 12641 v_0 := v.Args[0] 12642 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 12643 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12644 // result: (MOVWQSX x) 12645 for { 12646 off := auxIntToInt32(v.AuxInt) 12647 sym := auxToSym(v.Aux) 12648 ptr := v_0 12649 if v_1.Op != OpAMD64MOVWstore { 12650 break 12651 } 12652 off2 := auxIntToInt32(v_1.AuxInt) 12653 sym2 := auxToSym(v_1.Aux) 12654 x := v_1.Args[1] 12655 ptr2 := v_1.Args[0] 12656 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12657 break 12658 } 12659 v.reset(OpAMD64MOVWQSX) 12660 v.AddArg(x) 12661 return true 12662 } 12663 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12664 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 12665 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12666 for { 12667 off1 := auxIntToInt32(v.AuxInt) 12668 sym1 := auxToSym(v.Aux) 12669 if v_0.Op != OpAMD64LEAQ { 12670 break 12671 } 12672 off2 := auxIntToInt32(v_0.AuxInt) 12673 sym2 := auxToSym(v_0.Aux) 12674 base := v_0.Args[0] 12675 mem := v_1 12676 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 12677 break 12678 } 12679 v.reset(OpAMD64MOVWQSXload) 12680 v.AuxInt = int32ToAuxInt(off1 + off2) 12681 v.Aux = symToAux(mergeSym(sym1, sym2)) 12682 v.AddArg2(base, mem) 12683 return true 12684 } 12685 return false 12686 } 12687 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { 12688 v_0 := v.Args[0] 12689 b := v.Block 12690 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 12691 // cond: x.Uses == 1 && clobber(x) 12692 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12693 for { 12694 x := v_0 12695 if x.Op != OpAMD64MOVWload { 12696 break 12697 } 12698 off := auxIntToInt32(x.AuxInt) 12699 sym := auxToSym(x.Aux) 12700 mem := x.Args[1] 12701 ptr := x.Args[0] 12702 if !(x.Uses == 1 && clobber(x)) { 12703 break 12704 } 12705 b = x.Block 12706 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) 12707 v.copyOf(v0) 12708 v0.AuxInt = int32ToAuxInt(off) 12709 v0.Aux = symToAux(sym) 12710 v0.AddArg2(ptr, mem) 12711 return true 12712 } 12713 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 12714 // cond: x.Uses == 1 && clobber(x) 12715 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12716 for { 12717 x := v_0 12718 if x.Op != OpAMD64MOVLload { 12719 break 12720 } 12721 off := auxIntToInt32(x.AuxInt) 12722 sym := auxToSym(x.Aux) 12723 mem := x.Args[1] 12724 ptr := x.Args[0] 12725 if !(x.Uses == 1 && clobber(x)) { 12726 break 12727 } 12728 b = x.Block 12729 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) 12730 v.copyOf(v0) 12731 v0.AuxInt = int32ToAuxInt(off) 12732 v0.Aux = symToAux(sym) 12733 v0.AddArg2(ptr, mem) 12734 return true 12735 } 12736 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 12737 // cond: x.Uses == 1 && clobber(x) 12738 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12739 for { 12740 x := v_0 12741 if x.Op != OpAMD64MOVQload { 12742 break 12743 } 12744 off := auxIntToInt32(x.AuxInt) 12745 sym := auxToSym(x.Aux) 12746 mem := x.Args[1] 12747 ptr := x.Args[0] 12748 if !(x.Uses == 1 && clobber(x)) { 12749 break 12750 } 12751 b = x.Block 12752 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) 12753 v.copyOf(v0) 12754 v0.AuxInt = int32ToAuxInt(off) 12755 v0.Aux = symToAux(sym) 12756 v0.AddArg2(ptr, mem) 12757 return true 12758 } 12759 // match: (MOVWQZX x) 12760 // cond: zeroUpper48Bits(x,3) 12761 // result: x 12762 for { 12763 x := v_0 12764 if !(zeroUpper48Bits(x, 3)) { 12765 break 12766 } 12767 v.copyOf(x) 12768 return true 12769 } 12770 // match: (MOVWQZX (ANDLconst [c] x)) 12771 // result: (ANDLconst [c & 0xffff] x) 12772 for { 12773 if v_0.Op != OpAMD64ANDLconst { 12774 break 12775 } 12776 c := auxIntToInt32(v_0.AuxInt) 12777 x := v_0.Args[0] 12778 v.reset(OpAMD64ANDLconst) 12779 v.AuxInt = int32ToAuxInt(c & 0xffff) 12780 v.AddArg(x) 12781 return true 12782 } 12783 // match: (MOVWQZX (MOVWQZX x)) 12784 // result: (MOVWQZX x) 12785 for { 12786 if v_0.Op != OpAMD64MOVWQZX { 12787 break 12788 } 12789 x := v_0.Args[0] 12790 v.reset(OpAMD64MOVWQZX) 12791 v.AddArg(x) 12792 return true 12793 } 12794 // match: (MOVWQZX (MOVBQZX x)) 12795 // result: (MOVBQZX x) 12796 for { 12797 if v_0.Op != OpAMD64MOVBQZX { 12798 break 12799 } 12800 x := v_0.Args[0] 12801 v.reset(OpAMD64MOVBQZX) 12802 v.AddArg(x) 12803 return true 12804 } 12805 return false 12806 } 12807 func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { 12808 v_1 := v.Args[1] 12809 v_0 := v.Args[0] 12810 b := v.Block 12811 config := b.Func.Config 12812 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 12813 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12814 // result: (MOVWQZX x) 12815 for { 12816 off := auxIntToInt32(v.AuxInt) 12817 sym := auxToSym(v.Aux) 12818 ptr := v_0 12819 if v_1.Op != OpAMD64MOVWstore { 12820 break 12821 } 12822 off2 := auxIntToInt32(v_1.AuxInt) 12823 sym2 := auxToSym(v_1.Aux) 12824 x := v_1.Args[1] 12825 ptr2 := v_1.Args[0] 12826 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12827 break 12828 } 12829 v.reset(OpAMD64MOVWQZX) 12830 v.AddArg(x) 12831 return true 12832 } 12833 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 12834 // cond: is32Bit(int64(off1)+int64(off2)) 12835 // result: (MOVWload [off1+off2] {sym} ptr mem) 12836 for { 12837 off1 := auxIntToInt32(v.AuxInt) 12838 sym := auxToSym(v.Aux) 12839 if v_0.Op != OpAMD64ADDQconst { 12840 break 12841 } 12842 off2 := auxIntToInt32(v_0.AuxInt) 12843 ptr := v_0.Args[0] 12844 mem := v_1 12845 if !(is32Bit(int64(off1) + int64(off2))) { 12846 break 12847 } 12848 v.reset(OpAMD64MOVWload) 12849 v.AuxInt = int32ToAuxInt(off1 + off2) 12850 v.Aux = symToAux(sym) 12851 v.AddArg2(ptr, mem) 12852 return true 12853 } 12854 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12855 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 12856 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12857 for { 12858 off1 := auxIntToInt32(v.AuxInt) 12859 sym1 := auxToSym(v.Aux) 12860 if v_0.Op != OpAMD64LEAQ { 12861 break 12862 } 12863 off2 := auxIntToInt32(v_0.AuxInt) 12864 sym2 := auxToSym(v_0.Aux) 12865 base := v_0.Args[0] 12866 mem := v_1 12867 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 12868 break 12869 } 12870 v.reset(OpAMD64MOVWload) 12871 v.AuxInt = int32ToAuxInt(off1 + off2) 12872 v.Aux = symToAux(mergeSym(sym1, sym2)) 12873 v.AddArg2(base, mem) 12874 return true 12875 } 12876 // match: (MOVWload [off] {sym} (SB) _) 12877 // cond: symIsRO(sym) 12878 // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) 12879 for { 12880 off := auxIntToInt32(v.AuxInt) 12881 sym := auxToSym(v.Aux) 12882 if v_0.Op != OpSB || !(symIsRO(sym)) { 12883 break 12884 } 12885 v.reset(OpAMD64MOVLconst) 12886 v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))) 12887 return true 12888 } 12889 return false 12890 } 12891 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { 12892 v_2 := v.Args[2] 12893 v_1 := v.Args[1] 12894 v_0 := v.Args[0] 12895 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 12896 // result: (MOVWstore [off] {sym} ptr x mem) 12897 for { 12898 off := auxIntToInt32(v.AuxInt) 12899 sym := auxToSym(v.Aux) 12900 ptr := v_0 12901 if v_1.Op != OpAMD64MOVWQSX { 12902 break 12903 } 12904 x := v_1.Args[0] 12905 mem := v_2 12906 v.reset(OpAMD64MOVWstore) 12907 v.AuxInt = int32ToAuxInt(off) 12908 v.Aux = symToAux(sym) 12909 v.AddArg3(ptr, x, mem) 12910 return true 12911 } 12912 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 12913 // result: (MOVWstore [off] {sym} ptr x mem) 12914 for { 12915 off := auxIntToInt32(v.AuxInt) 12916 sym := auxToSym(v.Aux) 12917 ptr := v_0 12918 if v_1.Op != OpAMD64MOVWQZX { 12919 break 12920 } 12921 x := v_1.Args[0] 12922 mem := v_2 12923 v.reset(OpAMD64MOVWstore) 12924 v.AuxInt = int32ToAuxInt(off) 12925 v.Aux = symToAux(sym) 12926 v.AddArg3(ptr, x, mem) 12927 return true 12928 } 12929 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 12930 // cond: is32Bit(int64(off1)+int64(off2)) 12931 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 12932 for { 12933 off1 := auxIntToInt32(v.AuxInt) 12934 sym := auxToSym(v.Aux) 12935 if v_0.Op != OpAMD64ADDQconst { 12936 break 12937 } 12938 off2 := auxIntToInt32(v_0.AuxInt) 12939 ptr := v_0.Args[0] 12940 val := v_1 12941 mem := v_2 12942 if !(is32Bit(int64(off1) + int64(off2))) { 12943 break 12944 } 12945 v.reset(OpAMD64MOVWstore) 12946 v.AuxInt = int32ToAuxInt(off1 + off2) 12947 v.Aux = symToAux(sym) 12948 v.AddArg3(ptr, val, mem) 12949 return true 12950 } 12951 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 12952 // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) 12953 for { 12954 off := auxIntToInt32(v.AuxInt) 12955 sym := auxToSym(v.Aux) 12956 ptr := v_0 12957 if v_1.Op != OpAMD64MOVLconst { 12958 break 12959 } 12960 c := auxIntToInt32(v_1.AuxInt) 12961 mem := v_2 12962 v.reset(OpAMD64MOVWstoreconst) 12963 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) 12964 v.Aux = symToAux(sym) 12965 v.AddArg2(ptr, mem) 12966 return true 12967 } 12968 // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem) 12969 // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) 12970 for { 12971 off := auxIntToInt32(v.AuxInt) 12972 sym := auxToSym(v.Aux) 12973 ptr := v_0 12974 if v_1.Op != OpAMD64MOVQconst { 12975 break 12976 } 12977 c := auxIntToInt64(v_1.AuxInt) 12978 mem := v_2 12979 v.reset(OpAMD64MOVWstoreconst) 12980 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) 12981 v.Aux = symToAux(sym) 12982 v.AddArg2(ptr, mem) 12983 return true 12984 } 12985 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 12986 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 12987 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 12988 for { 12989 off1 := auxIntToInt32(v.AuxInt) 12990 sym1 := auxToSym(v.Aux) 12991 if v_0.Op != OpAMD64LEAQ { 12992 break 12993 } 12994 off2 := auxIntToInt32(v_0.AuxInt) 12995 sym2 := auxToSym(v_0.Aux) 12996 base := v_0.Args[0] 12997 val := v_1 12998 mem := v_2 12999 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 13000 break 13001 } 13002 v.reset(OpAMD64MOVWstore) 13003 v.AuxInt = int32ToAuxInt(off1 + off2) 13004 v.Aux = symToAux(mergeSym(sym1, sym2)) 13005 v.AddArg3(base, val, mem) 13006 return true 13007 } 13008 // match: (MOVWstore [i] {s} p x:(ROLWconst [8] w) mem) 13009 // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 13010 // result: (MOVBEWstore [i] {s} p w mem) 13011 for { 13012 i := auxIntToInt32(v.AuxInt) 13013 s := auxToSym(v.Aux) 13014 p := v_0 13015 x := v_1 13016 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 { 13017 break 13018 } 13019 w := x.Args[0] 13020 mem := v_2 13021 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { 13022 break 13023 } 13024 v.reset(OpAMD64MOVBEWstore) 13025 v.AuxInt = int32ToAuxInt(i) 13026 v.Aux = symToAux(s) 13027 v.AddArg3(p, w, mem) 13028 return true 13029 } 13030 return false 13031 } 13032 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { 13033 v_1 := v.Args[1] 13034 v_0 := v.Args[0] 13035 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 13036 // cond: ValAndOff(sc).canAdd32(off) 13037 // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) 13038 for { 13039 sc := auxIntToValAndOff(v.AuxInt) 13040 s := auxToSym(v.Aux) 13041 if v_0.Op != OpAMD64ADDQconst { 13042 break 13043 } 13044 off := auxIntToInt32(v_0.AuxInt) 13045 ptr := v_0.Args[0] 13046 mem := v_1 13047 if !(ValAndOff(sc).canAdd32(off)) { 13048 break 13049 } 13050 v.reset(OpAMD64MOVWstoreconst) 13051 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 13052 v.Aux = symToAux(s) 13053 v.AddArg2(ptr, mem) 13054 return true 13055 } 13056 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 13057 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) 13058 // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) 13059 for { 13060 sc := auxIntToValAndOff(v.AuxInt) 13061 sym1 := auxToSym(v.Aux) 13062 if v_0.Op != OpAMD64LEAQ { 13063 break 13064 } 13065 off := auxIntToInt32(v_0.AuxInt) 13066 sym2 := auxToSym(v_0.Aux) 13067 ptr := v_0.Args[0] 13068 mem := v_1 13069 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { 13070 break 13071 } 13072 v.reset(OpAMD64MOVWstoreconst) 13073 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) 13074 v.Aux = symToAux(mergeSym(sym1, sym2)) 13075 v.AddArg2(ptr, mem) 13076 return true 13077 } 13078 return false 13079 } 13080 func rewriteValueAMD64_OpAMD64MULL(v *Value) bool { 13081 v_1 := v.Args[1] 13082 v_0 := v.Args[0] 13083 // match: (MULL x (MOVLconst [c])) 13084 // result: (MULLconst [c] x) 13085 for { 13086 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 13087 x := v_0 13088 if v_1.Op != OpAMD64MOVLconst { 13089 continue 13090 } 13091 c := auxIntToInt32(v_1.AuxInt) 13092 v.reset(OpAMD64MULLconst) 13093 v.AuxInt = int32ToAuxInt(c) 13094 v.AddArg(x) 13095 return true 13096 } 13097 break 13098 } 13099 return false 13100 } 13101 func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { 13102 v_0 := v.Args[0] 13103 b := v.Block 13104 // match: (MULLconst [c] (MULLconst [d] x)) 13105 // result: (MULLconst [c * d] x) 13106 for { 13107 c := auxIntToInt32(v.AuxInt) 13108 if v_0.Op != OpAMD64MULLconst { 13109 break 13110 } 13111 d := auxIntToInt32(v_0.AuxInt) 13112 x := v_0.Args[0] 13113 v.reset(OpAMD64MULLconst) 13114 v.AuxInt = int32ToAuxInt(c * d) 13115 v.AddArg(x) 13116 return true 13117 } 13118 // match: (MULLconst [-9] x) 13119 // result: (NEGL (LEAL8 <v.Type> x x)) 13120 for { 13121 if auxIntToInt32(v.AuxInt) != -9 { 13122 break 13123 } 13124 x := v_0 13125 v.reset(OpAMD64NEGL) 13126 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 13127 v0.AddArg2(x, x) 13128 v.AddArg(v0) 13129 return true 13130 } 13131 // match: (MULLconst [-5] x) 13132 // result: (NEGL (LEAL4 <v.Type> x x)) 13133 for { 13134 if auxIntToInt32(v.AuxInt) != -5 { 13135 break 13136 } 13137 x := v_0 13138 v.reset(OpAMD64NEGL) 13139 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 13140 v0.AddArg2(x, x) 13141 v.AddArg(v0) 13142 return true 13143 } 13144 // match: (MULLconst [-3] x) 13145 // result: (NEGL (LEAL2 <v.Type> x x)) 13146 for { 13147 if auxIntToInt32(v.AuxInt) != -3 { 13148 break 13149 } 13150 x := v_0 13151 v.reset(OpAMD64NEGL) 13152 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 13153 v0.AddArg2(x, x) 13154 v.AddArg(v0) 13155 return true 13156 } 13157 // match: (MULLconst [-1] x) 13158 // result: (NEGL x) 13159 for { 13160 if auxIntToInt32(v.AuxInt) != -1 { 13161 break 13162 } 13163 x := v_0 13164 v.reset(OpAMD64NEGL) 13165 v.AddArg(x) 13166 return true 13167 } 13168 // match: (MULLconst [ 0] _) 13169 // result: (MOVLconst [0]) 13170 for { 13171 if auxIntToInt32(v.AuxInt) != 0 { 13172 break 13173 } 13174 v.reset(OpAMD64MOVLconst) 13175 v.AuxInt = int32ToAuxInt(0) 13176 return true 13177 } 13178 // match: (MULLconst [ 1] x) 13179 // result: x 13180 for { 13181 if auxIntToInt32(v.AuxInt) != 1 { 13182 break 13183 } 13184 x := v_0 13185 v.copyOf(x) 13186 return true 13187 } 13188 // match: (MULLconst [ 3] x) 13189 // result: (LEAL2 x x) 13190 for { 13191 if auxIntToInt32(v.AuxInt) != 3 { 13192 break 13193 } 13194 x := v_0 13195 v.reset(OpAMD64LEAL2) 13196 v.AddArg2(x, x) 13197 return true 13198 } 13199 // match: (MULLconst [ 5] x) 13200 // result: (LEAL4 x x) 13201 for { 13202 if auxIntToInt32(v.AuxInt) != 5 { 13203 break 13204 } 13205 x := v_0 13206 v.reset(OpAMD64LEAL4) 13207 v.AddArg2(x, x) 13208 return true 13209 } 13210 // match: (MULLconst [ 7] x) 13211 // result: (LEAL2 x (LEAL2 <v.Type> x x)) 13212 for { 13213 if auxIntToInt32(v.AuxInt) != 7 { 13214 break 13215 } 13216 x := v_0 13217 v.reset(OpAMD64LEAL2) 13218 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 13219 v0.AddArg2(x, x) 13220 v.AddArg2(x, v0) 13221 return true 13222 } 13223 // match: (MULLconst [ 9] x) 13224 // result: (LEAL8 x x) 13225 for { 13226 if auxIntToInt32(v.AuxInt) != 9 { 13227 break 13228 } 13229 x := v_0 13230 v.reset(OpAMD64LEAL8) 13231 v.AddArg2(x, x) 13232 return true 13233 } 13234 // match: (MULLconst [11] x) 13235 // result: (LEAL2 x (LEAL4 <v.Type> x x)) 13236 for { 13237 if auxIntToInt32(v.AuxInt) != 11 { 13238 break 13239 } 13240 x := v_0 13241 v.reset(OpAMD64LEAL2) 13242 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 13243 v0.AddArg2(x, x) 13244 v.AddArg2(x, v0) 13245 return true 13246 } 13247 // match: (MULLconst [13] x) 13248 // result: (LEAL4 x (LEAL2 <v.Type> x x)) 13249 for { 13250 if auxIntToInt32(v.AuxInt) != 13 { 13251 break 13252 } 13253 x := v_0 13254 v.reset(OpAMD64LEAL4) 13255 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 13256 v0.AddArg2(x, x) 13257 v.AddArg2(x, v0) 13258 return true 13259 } 13260 // match: (MULLconst [19] x) 13261 // result: (LEAL2 x (LEAL8 <v.Type> x x)) 13262 for { 13263 if auxIntToInt32(v.AuxInt) != 19 { 13264 break 13265 } 13266 x := v_0 13267 v.reset(OpAMD64LEAL2) 13268 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 13269 v0.AddArg2(x, x) 13270 v.AddArg2(x, v0) 13271 return true 13272 } 13273 // match: (MULLconst [21] x) 13274 // result: (LEAL4 x (LEAL4 <v.Type> x x)) 13275 for { 13276 if auxIntToInt32(v.AuxInt) != 21 { 13277 break 13278 } 13279 x := v_0 13280 v.reset(OpAMD64LEAL4) 13281 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 13282 v0.AddArg2(x, x) 13283 v.AddArg2(x, v0) 13284 return true 13285 } 13286 // match: (MULLconst [25] x) 13287 // result: (LEAL8 x (LEAL2 <v.Type> x x)) 13288 for { 13289 if auxIntToInt32(v.AuxInt) != 25 { 13290 break 13291 } 13292 x := v_0 13293 v.reset(OpAMD64LEAL8) 13294 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 13295 v0.AddArg2(x, x) 13296 v.AddArg2(x, v0) 13297 return true 13298 } 13299 // match: (MULLconst [27] x) 13300 // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x)) 13301 for { 13302 if auxIntToInt32(v.AuxInt) != 27 { 13303 break 13304 } 13305 x := v_0 13306 v.reset(OpAMD64LEAL8) 13307 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 13308 v0.AddArg2(x, x) 13309 v.AddArg2(v0, v0) 13310 return true 13311 } 13312 // match: (MULLconst [37] x) 13313 // result: (LEAL4 x (LEAL8 <v.Type> x x)) 13314 for { 13315 if auxIntToInt32(v.AuxInt) != 37 { 13316 break 13317 } 13318 x := v_0 13319 v.reset(OpAMD64LEAL4) 13320 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 13321 v0.AddArg2(x, x) 13322 v.AddArg2(x, v0) 13323 return true 13324 } 13325 // match: (MULLconst [41] x) 13326 // result: (LEAL8 x (LEAL4 <v.Type> x x)) 13327 for { 13328 if auxIntToInt32(v.AuxInt) != 41 { 13329 break 13330 } 13331 x := v_0 13332 v.reset(OpAMD64LEAL8) 13333 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 13334 v0.AddArg2(x, x) 13335 v.AddArg2(x, v0) 13336 return true 13337 } 13338 // match: (MULLconst [45] x) 13339 // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x)) 13340 for { 13341 if auxIntToInt32(v.AuxInt) != 45 { 13342 break 13343 } 13344 x := v_0 13345 v.reset(OpAMD64LEAL8) 13346 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 13347 v0.AddArg2(x, x) 13348 v.AddArg2(v0, v0) 13349 return true 13350 } 13351 // match: (MULLconst [73] x) 13352 // result: (LEAL8 x (LEAL8 <v.Type> x x)) 13353 for { 13354 if auxIntToInt32(v.AuxInt) != 73 { 13355 break 13356 } 13357 x := v_0 13358 v.reset(OpAMD64LEAL8) 13359 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 13360 v0.AddArg2(x, x) 13361 v.AddArg2(x, v0) 13362 return true 13363 } 13364 // match: (MULLconst [81] x) 13365 // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x)) 13366 for { 13367 if auxIntToInt32(v.AuxInt) != 81 { 13368 break 13369 } 13370 x := v_0 13371 v.reset(OpAMD64LEAL8) 13372 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 13373 v0.AddArg2(x, x) 13374 v.AddArg2(v0, v0) 13375 return true 13376 } 13377 // match: (MULLconst [c] x) 13378 // cond: isPowerOfTwo64(int64(c)+1) && c >= 15 13379 // result: (SUBL (SHLLconst <v.Type> [int8(log64(int64(c)+1))] x) x) 13380 for { 13381 c := auxIntToInt32(v.AuxInt) 13382 x := v_0 13383 if !(isPowerOfTwo64(int64(c)+1) && c >= 15) { 13384 break 13385 } 13386 v.reset(OpAMD64SUBL) 13387 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 13388 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1))) 13389 v0.AddArg(x) 13390 v.AddArg2(v0, x) 13391 return true 13392 } 13393 // match: (MULLconst [c] x) 13394 // cond: isPowerOfTwo32(c-1) && c >= 17 13395 // result: (LEAL1 (SHLLconst <v.Type> [int8(log32(c-1))] x) x) 13396 for { 13397 c := auxIntToInt32(v.AuxInt) 13398 x := v_0 13399 if !(isPowerOfTwo32(c-1) && c >= 17) { 13400 break 13401 } 13402 v.reset(OpAMD64LEAL1) 13403 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 13404 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1))) 13405 v0.AddArg(x) 13406 v.AddArg2(v0, x) 13407 return true 13408 } 13409 // match: (MULLconst [c] x) 13410 // cond: isPowerOfTwo32(c-2) && c >= 34 13411 // result: (LEAL2 (SHLLconst <v.Type> [int8(log32(c-2))] x) x) 13412 for { 13413 c := auxIntToInt32(v.AuxInt) 13414 x := v_0 13415 if !(isPowerOfTwo32(c-2) && c >= 34) { 13416 break 13417 } 13418 v.reset(OpAMD64LEAL2) 13419 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 13420 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2))) 13421 v0.AddArg(x) 13422 v.AddArg2(v0, x) 13423 return true 13424 } 13425 // match: (MULLconst [c] x) 13426 // cond: isPowerOfTwo32(c-4) && c >= 68 13427 // result: (LEAL4 (SHLLconst <v.Type> [int8(log32(c-4))] x) x) 13428 for { 13429 c := auxIntToInt32(v.AuxInt) 13430 x := v_0 13431 if !(isPowerOfTwo32(c-4) && c >= 68) { 13432 break 13433 } 13434 v.reset(OpAMD64LEAL4) 13435 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 13436 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4))) 13437 v0.AddArg(x) 13438 v.AddArg2(v0, x) 13439 return true 13440 } 13441 // match: (MULLconst [c] x) 13442 // cond: isPowerOfTwo32(c-8) && c >= 136 13443 // result: (LEAL8 (SHLLconst <v.Type> [int8(log32(c-8))] x) x) 13444 for { 13445 c := auxIntToInt32(v.AuxInt) 13446 x := v_0 13447 if !(isPowerOfTwo32(c-8) && c >= 136) { 13448 break 13449 } 13450 v.reset(OpAMD64LEAL8) 13451 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 13452 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8))) 13453 v0.AddArg(x) 13454 v.AddArg2(v0, x) 13455 return true 13456 } 13457 // match: (MULLconst [c] x) 13458 // cond: c%3 == 0 && isPowerOfTwo32(c/3) 13459 // result: (SHLLconst [int8(log32(c/3))] (LEAL2 <v.Type> x x)) 13460 for { 13461 c := auxIntToInt32(v.AuxInt) 13462 x := v_0 13463 if !(c%3 == 0 && isPowerOfTwo32(c/3)) { 13464 break 13465 } 13466 v.reset(OpAMD64SHLLconst) 13467 v.AuxInt = int8ToAuxInt(int8(log32(c / 3))) 13468 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 13469 v0.AddArg2(x, x) 13470 v.AddArg(v0) 13471 return true 13472 } 13473 // match: (MULLconst [c] x) 13474 // cond: c%5 == 0 && isPowerOfTwo32(c/5) 13475 // result: (SHLLconst [int8(log32(c/5))] (LEAL4 <v.Type> x x)) 13476 for { 13477 c := auxIntToInt32(v.AuxInt) 13478 x := v_0 13479 if !(c%5 == 0 && isPowerOfTwo32(c/5)) { 13480 break 13481 } 13482 v.reset(OpAMD64SHLLconst) 13483 v.AuxInt = int8ToAuxInt(int8(log32(c / 5))) 13484 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 13485 v0.AddArg2(x, x) 13486 v.AddArg(v0) 13487 return true 13488 } 13489 // match: (MULLconst [c] x) 13490 // cond: c%9 == 0 && isPowerOfTwo32(c/9) 13491 // result: (SHLLconst [int8(log32(c/9))] (LEAL8 <v.Type> x x)) 13492 for { 13493 c := auxIntToInt32(v.AuxInt) 13494 x := v_0 13495 if !(c%9 == 0 && isPowerOfTwo32(c/9)) { 13496 break 13497 } 13498 v.reset(OpAMD64SHLLconst) 13499 v.AuxInt = int8ToAuxInt(int8(log32(c / 9))) 13500 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 13501 v0.AddArg2(x, x) 13502 v.AddArg(v0) 13503 return true 13504 } 13505 // match: (MULLconst [c] (MOVLconst [d])) 13506 // result: (MOVLconst [c*d]) 13507 for { 13508 c := auxIntToInt32(v.AuxInt) 13509 if v_0.Op != OpAMD64MOVLconst { 13510 break 13511 } 13512 d := auxIntToInt32(v_0.AuxInt) 13513 v.reset(OpAMD64MOVLconst) 13514 v.AuxInt = int32ToAuxInt(c * d) 13515 return true 13516 } 13517 return false 13518 } 13519 func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool { 13520 v_1 := v.Args[1] 13521 v_0 := v.Args[0] 13522 // match: (MULQ x (MOVQconst [c])) 13523 // cond: is32Bit(c) 13524 // result: (MULQconst [int32(c)] x) 13525 for { 13526 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 13527 x := v_0 13528 if v_1.Op != OpAMD64MOVQconst { 13529 continue 13530 } 13531 c := auxIntToInt64(v_1.AuxInt) 13532 if !(is32Bit(c)) { 13533 continue 13534 } 13535 v.reset(OpAMD64MULQconst) 13536 v.AuxInt = int32ToAuxInt(int32(c)) 13537 v.AddArg(x) 13538 return true 13539 } 13540 break 13541 } 13542 return false 13543 } 13544 func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { 13545 v_0 := v.Args[0] 13546 b := v.Block 13547 // match: (MULQconst [c] (MULQconst [d] x)) 13548 // cond: is32Bit(int64(c)*int64(d)) 13549 // result: (MULQconst [c * d] x) 13550 for { 13551 c := auxIntToInt32(v.AuxInt) 13552 if v_0.Op != OpAMD64MULQconst { 13553 break 13554 } 13555 d := auxIntToInt32(v_0.AuxInt) 13556 x := v_0.Args[0] 13557 if !(is32Bit(int64(c) * int64(d))) { 13558 break 13559 } 13560 v.reset(OpAMD64MULQconst) 13561 v.AuxInt = int32ToAuxInt(c * d) 13562 v.AddArg(x) 13563 return true 13564 } 13565 // match: (MULQconst [-9] x) 13566 // result: (NEGQ (LEAQ8 <v.Type> x x)) 13567 for { 13568 if auxIntToInt32(v.AuxInt) != -9 { 13569 break 13570 } 13571 x := v_0 13572 v.reset(OpAMD64NEGQ) 13573 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 13574 v0.AddArg2(x, x) 13575 v.AddArg(v0) 13576 return true 13577 } 13578 // match: (MULQconst [-5] x) 13579 // result: (NEGQ (LEAQ4 <v.Type> x x)) 13580 for { 13581 if auxIntToInt32(v.AuxInt) != -5 { 13582 break 13583 } 13584 x := v_0 13585 v.reset(OpAMD64NEGQ) 13586 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 13587 v0.AddArg2(x, x) 13588 v.AddArg(v0) 13589 return true 13590 } 13591 // match: (MULQconst [-3] x) 13592 // result: (NEGQ (LEAQ2 <v.Type> x x)) 13593 for { 13594 if auxIntToInt32(v.AuxInt) != -3 { 13595 break 13596 } 13597 x := v_0 13598 v.reset(OpAMD64NEGQ) 13599 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 13600 v0.AddArg2(x, x) 13601 v.AddArg(v0) 13602 return true 13603 } 13604 // match: (MULQconst [-1] x) 13605 // result: (NEGQ x) 13606 for { 13607 if auxIntToInt32(v.AuxInt) != -1 { 13608 break 13609 } 13610 x := v_0 13611 v.reset(OpAMD64NEGQ) 13612 v.AddArg(x) 13613 return true 13614 } 13615 // match: (MULQconst [ 0] _) 13616 // result: (MOVQconst [0]) 13617 for { 13618 if auxIntToInt32(v.AuxInt) != 0 { 13619 break 13620 } 13621 v.reset(OpAMD64MOVQconst) 13622 v.AuxInt = int64ToAuxInt(0) 13623 return true 13624 } 13625 // match: (MULQconst [ 1] x) 13626 // result: x 13627 for { 13628 if auxIntToInt32(v.AuxInt) != 1 { 13629 break 13630 } 13631 x := v_0 13632 v.copyOf(x) 13633 return true 13634 } 13635 // match: (MULQconst [ 3] x) 13636 // result: (LEAQ2 x x) 13637 for { 13638 if auxIntToInt32(v.AuxInt) != 3 { 13639 break 13640 } 13641 x := v_0 13642 v.reset(OpAMD64LEAQ2) 13643 v.AddArg2(x, x) 13644 return true 13645 } 13646 // match: (MULQconst [ 5] x) 13647 // result: (LEAQ4 x x) 13648 for { 13649 if auxIntToInt32(v.AuxInt) != 5 { 13650 break 13651 } 13652 x := v_0 13653 v.reset(OpAMD64LEAQ4) 13654 v.AddArg2(x, x) 13655 return true 13656 } 13657 // match: (MULQconst [ 7] x) 13658 // result: (LEAQ2 x (LEAQ2 <v.Type> x x)) 13659 for { 13660 if auxIntToInt32(v.AuxInt) != 7 { 13661 break 13662 } 13663 x := v_0 13664 v.reset(OpAMD64LEAQ2) 13665 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 13666 v0.AddArg2(x, x) 13667 v.AddArg2(x, v0) 13668 return true 13669 } 13670 // match: (MULQconst [ 9] x) 13671 // result: (LEAQ8 x x) 13672 for { 13673 if auxIntToInt32(v.AuxInt) != 9 { 13674 break 13675 } 13676 x := v_0 13677 v.reset(OpAMD64LEAQ8) 13678 v.AddArg2(x, x) 13679 return true 13680 } 13681 // match: (MULQconst [11] x) 13682 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 13683 for { 13684 if auxIntToInt32(v.AuxInt) != 11 { 13685 break 13686 } 13687 x := v_0 13688 v.reset(OpAMD64LEAQ2) 13689 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 13690 v0.AddArg2(x, x) 13691 v.AddArg2(x, v0) 13692 return true 13693 } 13694 // match: (MULQconst [13] x) 13695 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 13696 for { 13697 if auxIntToInt32(v.AuxInt) != 13 { 13698 break 13699 } 13700 x := v_0 13701 v.reset(OpAMD64LEAQ4) 13702 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 13703 v0.AddArg2(x, x) 13704 v.AddArg2(x, v0) 13705 return true 13706 } 13707 // match: (MULQconst [19] x) 13708 // result: (LEAQ2 x (LEAQ8 <v.Type> x x)) 13709 for { 13710 if auxIntToInt32(v.AuxInt) != 19 { 13711 break 13712 } 13713 x := v_0 13714 v.reset(OpAMD64LEAQ2) 13715 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 13716 v0.AddArg2(x, x) 13717 v.AddArg2(x, v0) 13718 return true 13719 } 13720 // match: (MULQconst [21] x) 13721 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 13722 for { 13723 if auxIntToInt32(v.AuxInt) != 21 { 13724 break 13725 } 13726 x := v_0 13727 v.reset(OpAMD64LEAQ4) 13728 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 13729 v0.AddArg2(x, x) 13730 v.AddArg2(x, v0) 13731 return true 13732 } 13733 // match: (MULQconst [25] x) 13734 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 13735 for { 13736 if auxIntToInt32(v.AuxInt) != 25 { 13737 break 13738 } 13739 x := v_0 13740 v.reset(OpAMD64LEAQ8) 13741 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 13742 v0.AddArg2(x, x) 13743 v.AddArg2(x, v0) 13744 return true 13745 } 13746 // match: (MULQconst [27] x) 13747 // result: (LEAQ8 (LEAQ2 <v.Type> x x) (LEAQ2 <v.Type> x x)) 13748 for { 13749 if auxIntToInt32(v.AuxInt) != 27 { 13750 break 13751 } 13752 x := v_0 13753 v.reset(OpAMD64LEAQ8) 13754 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 13755 v0.AddArg2(x, x) 13756 v.AddArg2(v0, v0) 13757 return true 13758 } 13759 // match: (MULQconst [37] x) 13760 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 13761 for { 13762 if auxIntToInt32(v.AuxInt) != 37 { 13763 break 13764 } 13765 x := v_0 13766 v.reset(OpAMD64LEAQ4) 13767 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 13768 v0.AddArg2(x, x) 13769 v.AddArg2(x, v0) 13770 return true 13771 } 13772 // match: (MULQconst [41] x) 13773 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 13774 for { 13775 if auxIntToInt32(v.AuxInt) != 41 { 13776 break 13777 } 13778 x := v_0 13779 v.reset(OpAMD64LEAQ8) 13780 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 13781 v0.AddArg2(x, x) 13782 v.AddArg2(x, v0) 13783 return true 13784 } 13785 // match: (MULQconst [45] x) 13786 // result: (LEAQ8 (LEAQ4 <v.Type> x x) (LEAQ4 <v.Type> x x)) 13787 for { 13788 if auxIntToInt32(v.AuxInt) != 45 { 13789 break 13790 } 13791 x := v_0 13792 v.reset(OpAMD64LEAQ8) 13793 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 13794 v0.AddArg2(x, x) 13795 v.AddArg2(v0, v0) 13796 return true 13797 } 13798 // match: (MULQconst [73] x) 13799 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 13800 for { 13801 if auxIntToInt32(v.AuxInt) != 73 { 13802 break 13803 } 13804 x := v_0 13805 v.reset(OpAMD64LEAQ8) 13806 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 13807 v0.AddArg2(x, x) 13808 v.AddArg2(x, v0) 13809 return true 13810 } 13811 // match: (MULQconst [81] x) 13812 // result: (LEAQ8 (LEAQ8 <v.Type> x x) (LEAQ8 <v.Type> x x)) 13813 for { 13814 if auxIntToInt32(v.AuxInt) != 81 { 13815 break 13816 } 13817 x := v_0 13818 v.reset(OpAMD64LEAQ8) 13819 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 13820 v0.AddArg2(x, x) 13821 v.AddArg2(v0, v0) 13822 return true 13823 } 13824 // match: (MULQconst [c] x) 13825 // cond: isPowerOfTwo64(int64(c)+1) && c >= 15 13826 // result: (SUBQ (SHLQconst <v.Type> [int8(log64(int64(c)+1))] x) x) 13827 for { 13828 c := auxIntToInt32(v.AuxInt) 13829 x := v_0 13830 if !(isPowerOfTwo64(int64(c)+1) && c >= 15) { 13831 break 13832 } 13833 v.reset(OpAMD64SUBQ) 13834 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 13835 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1))) 13836 v0.AddArg(x) 13837 v.AddArg2(v0, x) 13838 return true 13839 } 13840 // match: (MULQconst [c] x) 13841 // cond: isPowerOfTwo32(c-1) && c >= 17 13842 // result: (LEAQ1 (SHLQconst <v.Type> [int8(log32(c-1))] x) x) 13843 for { 13844 c := auxIntToInt32(v.AuxInt) 13845 x := v_0 13846 if !(isPowerOfTwo32(c-1) && c >= 17) { 13847 break 13848 } 13849 v.reset(OpAMD64LEAQ1) 13850 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 13851 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1))) 13852 v0.AddArg(x) 13853 v.AddArg2(v0, x) 13854 return true 13855 } 13856 // match: (MULQconst [c] x) 13857 // cond: isPowerOfTwo32(c-2) && c >= 34 13858 // result: (LEAQ2 (SHLQconst <v.Type> [int8(log32(c-2))] x) x) 13859 for { 13860 c := auxIntToInt32(v.AuxInt) 13861 x := v_0 13862 if !(isPowerOfTwo32(c-2) && c >= 34) { 13863 break 13864 } 13865 v.reset(OpAMD64LEAQ2) 13866 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 13867 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2))) 13868 v0.AddArg(x) 13869 v.AddArg2(v0, x) 13870 return true 13871 } 13872 // match: (MULQconst [c] x) 13873 // cond: isPowerOfTwo32(c-4) && c >= 68 13874 // result: (LEAQ4 (SHLQconst <v.Type> [int8(log32(c-4))] x) x) 13875 for { 13876 c := auxIntToInt32(v.AuxInt) 13877 x := v_0 13878 if !(isPowerOfTwo32(c-4) && c >= 68) { 13879 break 13880 } 13881 v.reset(OpAMD64LEAQ4) 13882 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 13883 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4))) 13884 v0.AddArg(x) 13885 v.AddArg2(v0, x) 13886 return true 13887 } 13888 // match: (MULQconst [c] x) 13889 // cond: isPowerOfTwo32(c-8) && c >= 136 13890 // result: (LEAQ8 (SHLQconst <v.Type> [int8(log32(c-8))] x) x) 13891 for { 13892 c := auxIntToInt32(v.AuxInt) 13893 x := v_0 13894 if !(isPowerOfTwo32(c-8) && c >= 136) { 13895 break 13896 } 13897 v.reset(OpAMD64LEAQ8) 13898 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 13899 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8))) 13900 v0.AddArg(x) 13901 v.AddArg2(v0, x) 13902 return true 13903 } 13904 // match: (MULQconst [c] x) 13905 // cond: c%3 == 0 && isPowerOfTwo32(c/3) 13906 // result: (SHLQconst [int8(log32(c/3))] (LEAQ2 <v.Type> x x)) 13907 for { 13908 c := auxIntToInt32(v.AuxInt) 13909 x := v_0 13910 if !(c%3 == 0 && isPowerOfTwo32(c/3)) { 13911 break 13912 } 13913 v.reset(OpAMD64SHLQconst) 13914 v.AuxInt = int8ToAuxInt(int8(log32(c / 3))) 13915 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 13916 v0.AddArg2(x, x) 13917 v.AddArg(v0) 13918 return true 13919 } 13920 // match: (MULQconst [c] x) 13921 // cond: c%5 == 0 && isPowerOfTwo32(c/5) 13922 // result: (SHLQconst [int8(log32(c/5))] (LEAQ4 <v.Type> x x)) 13923 for { 13924 c := auxIntToInt32(v.AuxInt) 13925 x := v_0 13926 if !(c%5 == 0 && isPowerOfTwo32(c/5)) { 13927 break 13928 } 13929 v.reset(OpAMD64SHLQconst) 13930 v.AuxInt = int8ToAuxInt(int8(log32(c / 5))) 13931 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 13932 v0.AddArg2(x, x) 13933 v.AddArg(v0) 13934 return true 13935 } 13936 // match: (MULQconst [c] x) 13937 // cond: c%9 == 0 && isPowerOfTwo32(c/9) 13938 // result: (SHLQconst [int8(log32(c/9))] (LEAQ8 <v.Type> x x)) 13939 for { 13940 c := auxIntToInt32(v.AuxInt) 13941 x := v_0 13942 if !(c%9 == 0 && isPowerOfTwo32(c/9)) { 13943 break 13944 } 13945 v.reset(OpAMD64SHLQconst) 13946 v.AuxInt = int8ToAuxInt(int8(log32(c / 9))) 13947 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 13948 v0.AddArg2(x, x) 13949 v.AddArg(v0) 13950 return true 13951 } 13952 // match: (MULQconst [c] (MOVQconst [d])) 13953 // result: (MOVQconst [int64(c)*d]) 13954 for { 13955 c := auxIntToInt32(v.AuxInt) 13956 if v_0.Op != OpAMD64MOVQconst { 13957 break 13958 } 13959 d := auxIntToInt64(v_0.AuxInt) 13960 v.reset(OpAMD64MOVQconst) 13961 v.AuxInt = int64ToAuxInt(int64(c) * d) 13962 return true 13963 } 13964 // match: (MULQconst [c] (NEGQ x)) 13965 // cond: c != -(1<<31) 13966 // result: (MULQconst [-c] x) 13967 for { 13968 c := auxIntToInt32(v.AuxInt) 13969 if v_0.Op != OpAMD64NEGQ { 13970 break 13971 } 13972 x := v_0.Args[0] 13973 if !(c != -(1 << 31)) { 13974 break 13975 } 13976 v.reset(OpAMD64MULQconst) 13977 v.AuxInt = int32ToAuxInt(-c) 13978 v.AddArg(x) 13979 return true 13980 } 13981 return false 13982 } 13983 func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool { 13984 v_1 := v.Args[1] 13985 v_0 := v.Args[0] 13986 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 13987 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 13988 // result: (MULSDload x [off] {sym} ptr mem) 13989 for { 13990 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 13991 x := v_0 13992 l := v_1 13993 if l.Op != OpAMD64MOVSDload { 13994 continue 13995 } 13996 off := auxIntToInt32(l.AuxInt) 13997 sym := auxToSym(l.Aux) 13998 mem := l.Args[1] 13999 ptr := l.Args[0] 14000 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 14001 continue 14002 } 14003 v.reset(OpAMD64MULSDload) 14004 v.AuxInt = int32ToAuxInt(off) 14005 v.Aux = symToAux(sym) 14006 v.AddArg3(x, ptr, mem) 14007 return true 14008 } 14009 break 14010 } 14011 return false 14012 } 14013 func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { 14014 v_2 := v.Args[2] 14015 v_1 := v.Args[1] 14016 v_0 := v.Args[0] 14017 b := v.Block 14018 typ := &b.Func.Config.Types 14019 // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem) 14020 // cond: is32Bit(int64(off1)+int64(off2)) 14021 // result: (MULSDload [off1+off2] {sym} val base mem) 14022 for { 14023 off1 := auxIntToInt32(v.AuxInt) 14024 sym := auxToSym(v.Aux) 14025 val := v_0 14026 if v_1.Op != OpAMD64ADDQconst { 14027 break 14028 } 14029 off2 := auxIntToInt32(v_1.AuxInt) 14030 base := v_1.Args[0] 14031 mem := v_2 14032 if !(is32Bit(int64(off1) + int64(off2))) { 14033 break 14034 } 14035 v.reset(OpAMD64MULSDload) 14036 v.AuxInt = int32ToAuxInt(off1 + off2) 14037 v.Aux = symToAux(sym) 14038 v.AddArg3(val, base, mem) 14039 return true 14040 } 14041 // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 14042 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 14043 // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 14044 for { 14045 off1 := auxIntToInt32(v.AuxInt) 14046 sym1 := auxToSym(v.Aux) 14047 val := v_0 14048 if v_1.Op != OpAMD64LEAQ { 14049 break 14050 } 14051 off2 := auxIntToInt32(v_1.AuxInt) 14052 sym2 := auxToSym(v_1.Aux) 14053 base := v_1.Args[0] 14054 mem := v_2 14055 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 14056 break 14057 } 14058 v.reset(OpAMD64MULSDload) 14059 v.AuxInt = int32ToAuxInt(off1 + off2) 14060 v.Aux = symToAux(mergeSym(sym1, sym2)) 14061 v.AddArg3(val, base, mem) 14062 return true 14063 } 14064 // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 14065 // result: (MULSD x (MOVQi2f y)) 14066 for { 14067 off := auxIntToInt32(v.AuxInt) 14068 sym := auxToSym(v.Aux) 14069 x := v_0 14070 ptr := v_1 14071 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 14072 break 14073 } 14074 y := v_2.Args[1] 14075 if ptr != v_2.Args[0] { 14076 break 14077 } 14078 v.reset(OpAMD64MULSD) 14079 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) 14080 v0.AddArg(y) 14081 v.AddArg2(x, v0) 14082 return true 14083 } 14084 return false 14085 } 14086 func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool { 14087 v_1 := v.Args[1] 14088 v_0 := v.Args[0] 14089 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 14090 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 14091 // result: (MULSSload x [off] {sym} ptr mem) 14092 for { 14093 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14094 x := v_0 14095 l := v_1 14096 if l.Op != OpAMD64MOVSSload { 14097 continue 14098 } 14099 off := auxIntToInt32(l.AuxInt) 14100 sym := auxToSym(l.Aux) 14101 mem := l.Args[1] 14102 ptr := l.Args[0] 14103 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 14104 continue 14105 } 14106 v.reset(OpAMD64MULSSload) 14107 v.AuxInt = int32ToAuxInt(off) 14108 v.Aux = symToAux(sym) 14109 v.AddArg3(x, ptr, mem) 14110 return true 14111 } 14112 break 14113 } 14114 return false 14115 } 14116 func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { 14117 v_2 := v.Args[2] 14118 v_1 := v.Args[1] 14119 v_0 := v.Args[0] 14120 b := v.Block 14121 typ := &b.Func.Config.Types 14122 // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem) 14123 // cond: is32Bit(int64(off1)+int64(off2)) 14124 // result: (MULSSload [off1+off2] {sym} val base mem) 14125 for { 14126 off1 := auxIntToInt32(v.AuxInt) 14127 sym := auxToSym(v.Aux) 14128 val := v_0 14129 if v_1.Op != OpAMD64ADDQconst { 14130 break 14131 } 14132 off2 := auxIntToInt32(v_1.AuxInt) 14133 base := v_1.Args[0] 14134 mem := v_2 14135 if !(is32Bit(int64(off1) + int64(off2))) { 14136 break 14137 } 14138 v.reset(OpAMD64MULSSload) 14139 v.AuxInt = int32ToAuxInt(off1 + off2) 14140 v.Aux = symToAux(sym) 14141 v.AddArg3(val, base, mem) 14142 return true 14143 } 14144 // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 14145 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 14146 // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 14147 for { 14148 off1 := auxIntToInt32(v.AuxInt) 14149 sym1 := auxToSym(v.Aux) 14150 val := v_0 14151 if v_1.Op != OpAMD64LEAQ { 14152 break 14153 } 14154 off2 := auxIntToInt32(v_1.AuxInt) 14155 sym2 := auxToSym(v_1.Aux) 14156 base := v_1.Args[0] 14157 mem := v_2 14158 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 14159 break 14160 } 14161 v.reset(OpAMD64MULSSload) 14162 v.AuxInt = int32ToAuxInt(off1 + off2) 14163 v.Aux = symToAux(mergeSym(sym1, sym2)) 14164 v.AddArg3(val, base, mem) 14165 return true 14166 } 14167 // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 14168 // result: (MULSS x (MOVLi2f y)) 14169 for { 14170 off := auxIntToInt32(v.AuxInt) 14171 sym := auxToSym(v.Aux) 14172 x := v_0 14173 ptr := v_1 14174 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 14175 break 14176 } 14177 y := v_2.Args[1] 14178 if ptr != v_2.Args[0] { 14179 break 14180 } 14181 v.reset(OpAMD64MULSS) 14182 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) 14183 v0.AddArg(y) 14184 v.AddArg2(x, v0) 14185 return true 14186 } 14187 return false 14188 } 14189 func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool { 14190 v_0 := v.Args[0] 14191 // match: (NEGL (NEGL x)) 14192 // result: x 14193 for { 14194 if v_0.Op != OpAMD64NEGL { 14195 break 14196 } 14197 x := v_0.Args[0] 14198 v.copyOf(x) 14199 return true 14200 } 14201 // match: (NEGL s:(SUBL x y)) 14202 // cond: s.Uses == 1 14203 // result: (SUBL y x) 14204 for { 14205 s := v_0 14206 if s.Op != OpAMD64SUBL { 14207 break 14208 } 14209 y := s.Args[1] 14210 x := s.Args[0] 14211 if !(s.Uses == 1) { 14212 break 14213 } 14214 v.reset(OpAMD64SUBL) 14215 v.AddArg2(y, x) 14216 return true 14217 } 14218 // match: (NEGL (MOVLconst [c])) 14219 // result: (MOVLconst [-c]) 14220 for { 14221 if v_0.Op != OpAMD64MOVLconst { 14222 break 14223 } 14224 c := auxIntToInt32(v_0.AuxInt) 14225 v.reset(OpAMD64MOVLconst) 14226 v.AuxInt = int32ToAuxInt(-c) 14227 return true 14228 } 14229 return false 14230 } 14231 func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool { 14232 v_0 := v.Args[0] 14233 // match: (NEGQ (NEGQ x)) 14234 // result: x 14235 for { 14236 if v_0.Op != OpAMD64NEGQ { 14237 break 14238 } 14239 x := v_0.Args[0] 14240 v.copyOf(x) 14241 return true 14242 } 14243 // match: (NEGQ s:(SUBQ x y)) 14244 // cond: s.Uses == 1 14245 // result: (SUBQ y x) 14246 for { 14247 s := v_0 14248 if s.Op != OpAMD64SUBQ { 14249 break 14250 } 14251 y := s.Args[1] 14252 x := s.Args[0] 14253 if !(s.Uses == 1) { 14254 break 14255 } 14256 v.reset(OpAMD64SUBQ) 14257 v.AddArg2(y, x) 14258 return true 14259 } 14260 // match: (NEGQ (MOVQconst [c])) 14261 // result: (MOVQconst [-c]) 14262 for { 14263 if v_0.Op != OpAMD64MOVQconst { 14264 break 14265 } 14266 c := auxIntToInt64(v_0.AuxInt) 14267 v.reset(OpAMD64MOVQconst) 14268 v.AuxInt = int64ToAuxInt(-c) 14269 return true 14270 } 14271 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 14272 // cond: c != -(1<<31) 14273 // result: (ADDQconst [-c] x) 14274 for { 14275 if v_0.Op != OpAMD64ADDQconst { 14276 break 14277 } 14278 c := auxIntToInt32(v_0.AuxInt) 14279 v_0_0 := v_0.Args[0] 14280 if v_0_0.Op != OpAMD64NEGQ { 14281 break 14282 } 14283 x := v_0_0.Args[0] 14284 if !(c != -(1 << 31)) { 14285 break 14286 } 14287 v.reset(OpAMD64ADDQconst) 14288 v.AuxInt = int32ToAuxInt(-c) 14289 v.AddArg(x) 14290 return true 14291 } 14292 return false 14293 } 14294 func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool { 14295 v_0 := v.Args[0] 14296 // match: (NOTL (MOVLconst [c])) 14297 // result: (MOVLconst [^c]) 14298 for { 14299 if v_0.Op != OpAMD64MOVLconst { 14300 break 14301 } 14302 c := auxIntToInt32(v_0.AuxInt) 14303 v.reset(OpAMD64MOVLconst) 14304 v.AuxInt = int32ToAuxInt(^c) 14305 return true 14306 } 14307 return false 14308 } 14309 func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool { 14310 v_0 := v.Args[0] 14311 // match: (NOTQ (MOVQconst [c])) 14312 // result: (MOVQconst [^c]) 14313 for { 14314 if v_0.Op != OpAMD64MOVQconst { 14315 break 14316 } 14317 c := auxIntToInt64(v_0.AuxInt) 14318 v.reset(OpAMD64MOVQconst) 14319 v.AuxInt = int64ToAuxInt(^c) 14320 return true 14321 } 14322 return false 14323 } 14324 func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { 14325 v_1 := v.Args[1] 14326 v_0 := v.Args[0] 14327 // match: (ORL (SHLL (MOVLconst [1]) y) x) 14328 // result: (BTSL x y) 14329 for { 14330 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14331 if v_0.Op != OpAMD64SHLL { 14332 continue 14333 } 14334 y := v_0.Args[1] 14335 v_0_0 := v_0.Args[0] 14336 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 { 14337 continue 14338 } 14339 x := v_1 14340 v.reset(OpAMD64BTSL) 14341 v.AddArg2(x, y) 14342 return true 14343 } 14344 break 14345 } 14346 // match: (ORL x (MOVLconst [c])) 14347 // result: (ORLconst [c] x) 14348 for { 14349 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14350 x := v_0 14351 if v_1.Op != OpAMD64MOVLconst { 14352 continue 14353 } 14354 c := auxIntToInt32(v_1.AuxInt) 14355 v.reset(OpAMD64ORLconst) 14356 v.AuxInt = int32ToAuxInt(c) 14357 v.AddArg(x) 14358 return true 14359 } 14360 break 14361 } 14362 // match: (ORL x x) 14363 // result: x 14364 for { 14365 x := v_0 14366 if x != v_1 { 14367 break 14368 } 14369 v.copyOf(x) 14370 return true 14371 } 14372 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 14373 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 14374 // result: (ORLload x [off] {sym} ptr mem) 14375 for { 14376 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14377 x := v_0 14378 l := v_1 14379 if l.Op != OpAMD64MOVLload { 14380 continue 14381 } 14382 off := auxIntToInt32(l.AuxInt) 14383 sym := auxToSym(l.Aux) 14384 mem := l.Args[1] 14385 ptr := l.Args[0] 14386 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 14387 continue 14388 } 14389 v.reset(OpAMD64ORLload) 14390 v.AuxInt = int32ToAuxInt(off) 14391 v.Aux = symToAux(sym) 14392 v.AddArg3(x, ptr, mem) 14393 return true 14394 } 14395 break 14396 } 14397 return false 14398 } 14399 func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool { 14400 v_0 := v.Args[0] 14401 // match: (ORLconst [c] (ORLconst [d] x)) 14402 // result: (ORLconst [c | d] x) 14403 for { 14404 c := auxIntToInt32(v.AuxInt) 14405 if v_0.Op != OpAMD64ORLconst { 14406 break 14407 } 14408 d := auxIntToInt32(v_0.AuxInt) 14409 x := v_0.Args[0] 14410 v.reset(OpAMD64ORLconst) 14411 v.AuxInt = int32ToAuxInt(c | d) 14412 v.AddArg(x) 14413 return true 14414 } 14415 // match: (ORLconst [c] x) 14416 // cond: c==0 14417 // result: x 14418 for { 14419 c := auxIntToInt32(v.AuxInt) 14420 x := v_0 14421 if !(c == 0) { 14422 break 14423 } 14424 v.copyOf(x) 14425 return true 14426 } 14427 // match: (ORLconst [c] _) 14428 // cond: c==-1 14429 // result: (MOVLconst [-1]) 14430 for { 14431 c := auxIntToInt32(v.AuxInt) 14432 if !(c == -1) { 14433 break 14434 } 14435 v.reset(OpAMD64MOVLconst) 14436 v.AuxInt = int32ToAuxInt(-1) 14437 return true 14438 } 14439 // match: (ORLconst [c] (MOVLconst [d])) 14440 // result: (MOVLconst [c|d]) 14441 for { 14442 c := auxIntToInt32(v.AuxInt) 14443 if v_0.Op != OpAMD64MOVLconst { 14444 break 14445 } 14446 d := auxIntToInt32(v_0.AuxInt) 14447 v.reset(OpAMD64MOVLconst) 14448 v.AuxInt = int32ToAuxInt(c | d) 14449 return true 14450 } 14451 return false 14452 } 14453 func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool { 14454 v_1 := v.Args[1] 14455 v_0 := v.Args[0] 14456 // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 14457 // cond: ValAndOff(valoff1).canAdd32(off2) 14458 // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 14459 for { 14460 valoff1 := auxIntToValAndOff(v.AuxInt) 14461 sym := auxToSym(v.Aux) 14462 if v_0.Op != OpAMD64ADDQconst { 14463 break 14464 } 14465 off2 := auxIntToInt32(v_0.AuxInt) 14466 base := v_0.Args[0] 14467 mem := v_1 14468 if !(ValAndOff(valoff1).canAdd32(off2)) { 14469 break 14470 } 14471 v.reset(OpAMD64ORLconstmodify) 14472 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 14473 v.Aux = symToAux(sym) 14474 v.AddArg2(base, mem) 14475 return true 14476 } 14477 // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 14478 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 14479 // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 14480 for { 14481 valoff1 := auxIntToValAndOff(v.AuxInt) 14482 sym1 := auxToSym(v.Aux) 14483 if v_0.Op != OpAMD64LEAQ { 14484 break 14485 } 14486 off2 := auxIntToInt32(v_0.AuxInt) 14487 sym2 := auxToSym(v_0.Aux) 14488 base := v_0.Args[0] 14489 mem := v_1 14490 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 14491 break 14492 } 14493 v.reset(OpAMD64ORLconstmodify) 14494 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 14495 v.Aux = symToAux(mergeSym(sym1, sym2)) 14496 v.AddArg2(base, mem) 14497 return true 14498 } 14499 return false 14500 } 14501 func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { 14502 v_2 := v.Args[2] 14503 v_1 := v.Args[1] 14504 v_0 := v.Args[0] 14505 b := v.Block 14506 typ := &b.Func.Config.Types 14507 // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem) 14508 // cond: is32Bit(int64(off1)+int64(off2)) 14509 // result: (ORLload [off1+off2] {sym} val base mem) 14510 for { 14511 off1 := auxIntToInt32(v.AuxInt) 14512 sym := auxToSym(v.Aux) 14513 val := v_0 14514 if v_1.Op != OpAMD64ADDQconst { 14515 break 14516 } 14517 off2 := auxIntToInt32(v_1.AuxInt) 14518 base := v_1.Args[0] 14519 mem := v_2 14520 if !(is32Bit(int64(off1) + int64(off2))) { 14521 break 14522 } 14523 v.reset(OpAMD64ORLload) 14524 v.AuxInt = int32ToAuxInt(off1 + off2) 14525 v.Aux = symToAux(sym) 14526 v.AddArg3(val, base, mem) 14527 return true 14528 } 14529 // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 14530 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 14531 // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 14532 for { 14533 off1 := auxIntToInt32(v.AuxInt) 14534 sym1 := auxToSym(v.Aux) 14535 val := v_0 14536 if v_1.Op != OpAMD64LEAQ { 14537 break 14538 } 14539 off2 := auxIntToInt32(v_1.AuxInt) 14540 sym2 := auxToSym(v_1.Aux) 14541 base := v_1.Args[0] 14542 mem := v_2 14543 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 14544 break 14545 } 14546 v.reset(OpAMD64ORLload) 14547 v.AuxInt = int32ToAuxInt(off1 + off2) 14548 v.Aux = symToAux(mergeSym(sym1, sym2)) 14549 v.AddArg3(val, base, mem) 14550 return true 14551 } 14552 // match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 14553 // result: ( ORL x (MOVLf2i y)) 14554 for { 14555 off := auxIntToInt32(v.AuxInt) 14556 sym := auxToSym(v.Aux) 14557 x := v_0 14558 ptr := v_1 14559 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 14560 break 14561 } 14562 y := v_2.Args[1] 14563 if ptr != v_2.Args[0] { 14564 break 14565 } 14566 v.reset(OpAMD64ORL) 14567 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 14568 v0.AddArg(y) 14569 v.AddArg2(x, v0) 14570 return true 14571 } 14572 return false 14573 } 14574 func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool { 14575 v_2 := v.Args[2] 14576 v_1 := v.Args[1] 14577 v_0 := v.Args[0] 14578 // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 14579 // cond: is32Bit(int64(off1)+int64(off2)) 14580 // result: (ORLmodify [off1+off2] {sym} base val mem) 14581 for { 14582 off1 := auxIntToInt32(v.AuxInt) 14583 sym := auxToSym(v.Aux) 14584 if v_0.Op != OpAMD64ADDQconst { 14585 break 14586 } 14587 off2 := auxIntToInt32(v_0.AuxInt) 14588 base := v_0.Args[0] 14589 val := v_1 14590 mem := v_2 14591 if !(is32Bit(int64(off1) + int64(off2))) { 14592 break 14593 } 14594 v.reset(OpAMD64ORLmodify) 14595 v.AuxInt = int32ToAuxInt(off1 + off2) 14596 v.Aux = symToAux(sym) 14597 v.AddArg3(base, val, mem) 14598 return true 14599 } 14600 // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 14601 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 14602 // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 14603 for { 14604 off1 := auxIntToInt32(v.AuxInt) 14605 sym1 := auxToSym(v.Aux) 14606 if v_0.Op != OpAMD64LEAQ { 14607 break 14608 } 14609 off2 := auxIntToInt32(v_0.AuxInt) 14610 sym2 := auxToSym(v_0.Aux) 14611 base := v_0.Args[0] 14612 val := v_1 14613 mem := v_2 14614 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 14615 break 14616 } 14617 v.reset(OpAMD64ORLmodify) 14618 v.AuxInt = int32ToAuxInt(off1 + off2) 14619 v.Aux = symToAux(mergeSym(sym1, sym2)) 14620 v.AddArg3(base, val, mem) 14621 return true 14622 } 14623 return false 14624 } 14625 func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { 14626 v_1 := v.Args[1] 14627 v_0 := v.Args[0] 14628 // match: (ORQ (SHLQ (MOVQconst [1]) y) x) 14629 // result: (BTSQ x y) 14630 for { 14631 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14632 if v_0.Op != OpAMD64SHLQ { 14633 continue 14634 } 14635 y := v_0.Args[1] 14636 v_0_0 := v_0.Args[0] 14637 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 { 14638 continue 14639 } 14640 x := v_1 14641 v.reset(OpAMD64BTSQ) 14642 v.AddArg2(x, y) 14643 return true 14644 } 14645 break 14646 } 14647 // match: (ORQ (MOVQconst [c]) x) 14648 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31 14649 // result: (BTSQconst [int8(log64(c))] x) 14650 for { 14651 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14652 if v_0.Op != OpAMD64MOVQconst { 14653 continue 14654 } 14655 c := auxIntToInt64(v_0.AuxInt) 14656 x := v_1 14657 if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) { 14658 continue 14659 } 14660 v.reset(OpAMD64BTSQconst) 14661 v.AuxInt = int8ToAuxInt(int8(log64(c))) 14662 v.AddArg(x) 14663 return true 14664 } 14665 break 14666 } 14667 // match: (ORQ x (MOVQconst [c])) 14668 // cond: is32Bit(c) 14669 // result: (ORQconst [int32(c)] x) 14670 for { 14671 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14672 x := v_0 14673 if v_1.Op != OpAMD64MOVQconst { 14674 continue 14675 } 14676 c := auxIntToInt64(v_1.AuxInt) 14677 if !(is32Bit(c)) { 14678 continue 14679 } 14680 v.reset(OpAMD64ORQconst) 14681 v.AuxInt = int32ToAuxInt(int32(c)) 14682 v.AddArg(x) 14683 return true 14684 } 14685 break 14686 } 14687 // match: (ORQ x (MOVLconst [c])) 14688 // result: (ORQconst [c] x) 14689 for { 14690 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14691 x := v_0 14692 if v_1.Op != OpAMD64MOVLconst { 14693 continue 14694 } 14695 c := auxIntToInt32(v_1.AuxInt) 14696 v.reset(OpAMD64ORQconst) 14697 v.AuxInt = int32ToAuxInt(c) 14698 v.AddArg(x) 14699 return true 14700 } 14701 break 14702 } 14703 // match: (ORQ (SHRQ lo bits) (SHLQ hi (NEGQ bits))) 14704 // result: (SHRDQ lo hi bits) 14705 for { 14706 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14707 if v_0.Op != OpAMD64SHRQ { 14708 continue 14709 } 14710 bits := v_0.Args[1] 14711 lo := v_0.Args[0] 14712 if v_1.Op != OpAMD64SHLQ { 14713 continue 14714 } 14715 _ = v_1.Args[1] 14716 hi := v_1.Args[0] 14717 v_1_1 := v_1.Args[1] 14718 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { 14719 continue 14720 } 14721 v.reset(OpAMD64SHRDQ) 14722 v.AddArg3(lo, hi, bits) 14723 return true 14724 } 14725 break 14726 } 14727 // match: (ORQ (SHLQ lo bits) (SHRQ hi (NEGQ bits))) 14728 // result: (SHLDQ lo hi bits) 14729 for { 14730 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14731 if v_0.Op != OpAMD64SHLQ { 14732 continue 14733 } 14734 bits := v_0.Args[1] 14735 lo := v_0.Args[0] 14736 if v_1.Op != OpAMD64SHRQ { 14737 continue 14738 } 14739 _ = v_1.Args[1] 14740 hi := v_1.Args[0] 14741 v_1_1 := v_1.Args[1] 14742 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { 14743 continue 14744 } 14745 v.reset(OpAMD64SHLDQ) 14746 v.AddArg3(lo, hi, bits) 14747 return true 14748 } 14749 break 14750 } 14751 // match: (ORQ (SHRXQ lo bits) (SHLXQ hi (NEGQ bits))) 14752 // result: (SHRDQ lo hi bits) 14753 for { 14754 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14755 if v_0.Op != OpAMD64SHRXQ { 14756 continue 14757 } 14758 bits := v_0.Args[1] 14759 lo := v_0.Args[0] 14760 if v_1.Op != OpAMD64SHLXQ { 14761 continue 14762 } 14763 _ = v_1.Args[1] 14764 hi := v_1.Args[0] 14765 v_1_1 := v_1.Args[1] 14766 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { 14767 continue 14768 } 14769 v.reset(OpAMD64SHRDQ) 14770 v.AddArg3(lo, hi, bits) 14771 return true 14772 } 14773 break 14774 } 14775 // match: (ORQ (SHLXQ lo bits) (SHRXQ hi (NEGQ bits))) 14776 // result: (SHLDQ lo hi bits) 14777 for { 14778 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14779 if v_0.Op != OpAMD64SHLXQ { 14780 continue 14781 } 14782 bits := v_0.Args[1] 14783 lo := v_0.Args[0] 14784 if v_1.Op != OpAMD64SHRXQ { 14785 continue 14786 } 14787 _ = v_1.Args[1] 14788 hi := v_1.Args[0] 14789 v_1_1 := v_1.Args[1] 14790 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { 14791 continue 14792 } 14793 v.reset(OpAMD64SHLDQ) 14794 v.AddArg3(lo, hi, bits) 14795 return true 14796 } 14797 break 14798 } 14799 // match: (ORQ (MOVQconst [c]) (MOVQconst [d])) 14800 // result: (MOVQconst [c|d]) 14801 for { 14802 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14803 if v_0.Op != OpAMD64MOVQconst { 14804 continue 14805 } 14806 c := auxIntToInt64(v_0.AuxInt) 14807 if v_1.Op != OpAMD64MOVQconst { 14808 continue 14809 } 14810 d := auxIntToInt64(v_1.AuxInt) 14811 v.reset(OpAMD64MOVQconst) 14812 v.AuxInt = int64ToAuxInt(c | d) 14813 return true 14814 } 14815 break 14816 } 14817 // match: (ORQ x x) 14818 // result: x 14819 for { 14820 x := v_0 14821 if x != v_1 { 14822 break 14823 } 14824 v.copyOf(x) 14825 return true 14826 } 14827 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 14828 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 14829 // result: (ORQload x [off] {sym} ptr mem) 14830 for { 14831 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 14832 x := v_0 14833 l := v_1 14834 if l.Op != OpAMD64MOVQload { 14835 continue 14836 } 14837 off := auxIntToInt32(l.AuxInt) 14838 sym := auxToSym(l.Aux) 14839 mem := l.Args[1] 14840 ptr := l.Args[0] 14841 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 14842 continue 14843 } 14844 v.reset(OpAMD64ORQload) 14845 v.AuxInt = int32ToAuxInt(off) 14846 v.Aux = symToAux(sym) 14847 v.AddArg3(x, ptr, mem) 14848 return true 14849 } 14850 break 14851 } 14852 return false 14853 } 14854 func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool { 14855 v_0 := v.Args[0] 14856 // match: (ORQconst [c] (ORQconst [d] x)) 14857 // result: (ORQconst [c | d] x) 14858 for { 14859 c := auxIntToInt32(v.AuxInt) 14860 if v_0.Op != OpAMD64ORQconst { 14861 break 14862 } 14863 d := auxIntToInt32(v_0.AuxInt) 14864 x := v_0.Args[0] 14865 v.reset(OpAMD64ORQconst) 14866 v.AuxInt = int32ToAuxInt(c | d) 14867 v.AddArg(x) 14868 return true 14869 } 14870 // match: (ORQconst [0] x) 14871 // result: x 14872 for { 14873 if auxIntToInt32(v.AuxInt) != 0 { 14874 break 14875 } 14876 x := v_0 14877 v.copyOf(x) 14878 return true 14879 } 14880 // match: (ORQconst [-1] _) 14881 // result: (MOVQconst [-1]) 14882 for { 14883 if auxIntToInt32(v.AuxInt) != -1 { 14884 break 14885 } 14886 v.reset(OpAMD64MOVQconst) 14887 v.AuxInt = int64ToAuxInt(-1) 14888 return true 14889 } 14890 // match: (ORQconst [c] (MOVQconst [d])) 14891 // result: (MOVQconst [int64(c)|d]) 14892 for { 14893 c := auxIntToInt32(v.AuxInt) 14894 if v_0.Op != OpAMD64MOVQconst { 14895 break 14896 } 14897 d := auxIntToInt64(v_0.AuxInt) 14898 v.reset(OpAMD64MOVQconst) 14899 v.AuxInt = int64ToAuxInt(int64(c) | d) 14900 return true 14901 } 14902 return false 14903 } 14904 func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool { 14905 v_1 := v.Args[1] 14906 v_0 := v.Args[0] 14907 // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 14908 // cond: ValAndOff(valoff1).canAdd32(off2) 14909 // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 14910 for { 14911 valoff1 := auxIntToValAndOff(v.AuxInt) 14912 sym := auxToSym(v.Aux) 14913 if v_0.Op != OpAMD64ADDQconst { 14914 break 14915 } 14916 off2 := auxIntToInt32(v_0.AuxInt) 14917 base := v_0.Args[0] 14918 mem := v_1 14919 if !(ValAndOff(valoff1).canAdd32(off2)) { 14920 break 14921 } 14922 v.reset(OpAMD64ORQconstmodify) 14923 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 14924 v.Aux = symToAux(sym) 14925 v.AddArg2(base, mem) 14926 return true 14927 } 14928 // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 14929 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 14930 // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 14931 for { 14932 valoff1 := auxIntToValAndOff(v.AuxInt) 14933 sym1 := auxToSym(v.Aux) 14934 if v_0.Op != OpAMD64LEAQ { 14935 break 14936 } 14937 off2 := auxIntToInt32(v_0.AuxInt) 14938 sym2 := auxToSym(v_0.Aux) 14939 base := v_0.Args[0] 14940 mem := v_1 14941 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 14942 break 14943 } 14944 v.reset(OpAMD64ORQconstmodify) 14945 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 14946 v.Aux = symToAux(mergeSym(sym1, sym2)) 14947 v.AddArg2(base, mem) 14948 return true 14949 } 14950 return false 14951 } 14952 func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { 14953 v_2 := v.Args[2] 14954 v_1 := v.Args[1] 14955 v_0 := v.Args[0] 14956 b := v.Block 14957 typ := &b.Func.Config.Types 14958 // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem) 14959 // cond: is32Bit(int64(off1)+int64(off2)) 14960 // result: (ORQload [off1+off2] {sym} val base mem) 14961 for { 14962 off1 := auxIntToInt32(v.AuxInt) 14963 sym := auxToSym(v.Aux) 14964 val := v_0 14965 if v_1.Op != OpAMD64ADDQconst { 14966 break 14967 } 14968 off2 := auxIntToInt32(v_1.AuxInt) 14969 base := v_1.Args[0] 14970 mem := v_2 14971 if !(is32Bit(int64(off1) + int64(off2))) { 14972 break 14973 } 14974 v.reset(OpAMD64ORQload) 14975 v.AuxInt = int32ToAuxInt(off1 + off2) 14976 v.Aux = symToAux(sym) 14977 v.AddArg3(val, base, mem) 14978 return true 14979 } 14980 // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 14981 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 14982 // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 14983 for { 14984 off1 := auxIntToInt32(v.AuxInt) 14985 sym1 := auxToSym(v.Aux) 14986 val := v_0 14987 if v_1.Op != OpAMD64LEAQ { 14988 break 14989 } 14990 off2 := auxIntToInt32(v_1.AuxInt) 14991 sym2 := auxToSym(v_1.Aux) 14992 base := v_1.Args[0] 14993 mem := v_2 14994 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 14995 break 14996 } 14997 v.reset(OpAMD64ORQload) 14998 v.AuxInt = int32ToAuxInt(off1 + off2) 14999 v.Aux = symToAux(mergeSym(sym1, sym2)) 15000 v.AddArg3(val, base, mem) 15001 return true 15002 } 15003 // match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 15004 // result: ( ORQ x (MOVQf2i y)) 15005 for { 15006 off := auxIntToInt32(v.AuxInt) 15007 sym := auxToSym(v.Aux) 15008 x := v_0 15009 ptr := v_1 15010 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 15011 break 15012 } 15013 y := v_2.Args[1] 15014 if ptr != v_2.Args[0] { 15015 break 15016 } 15017 v.reset(OpAMD64ORQ) 15018 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 15019 v0.AddArg(y) 15020 v.AddArg2(x, v0) 15021 return true 15022 } 15023 return false 15024 } 15025 func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool { 15026 v_2 := v.Args[2] 15027 v_1 := v.Args[1] 15028 v_0 := v.Args[0] 15029 // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 15030 // cond: is32Bit(int64(off1)+int64(off2)) 15031 // result: (ORQmodify [off1+off2] {sym} base val mem) 15032 for { 15033 off1 := auxIntToInt32(v.AuxInt) 15034 sym := auxToSym(v.Aux) 15035 if v_0.Op != OpAMD64ADDQconst { 15036 break 15037 } 15038 off2 := auxIntToInt32(v_0.AuxInt) 15039 base := v_0.Args[0] 15040 val := v_1 15041 mem := v_2 15042 if !(is32Bit(int64(off1) + int64(off2))) { 15043 break 15044 } 15045 v.reset(OpAMD64ORQmodify) 15046 v.AuxInt = int32ToAuxInt(off1 + off2) 15047 v.Aux = symToAux(sym) 15048 v.AddArg3(base, val, mem) 15049 return true 15050 } 15051 // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 15052 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 15053 // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 15054 for { 15055 off1 := auxIntToInt32(v.AuxInt) 15056 sym1 := auxToSym(v.Aux) 15057 if v_0.Op != OpAMD64LEAQ { 15058 break 15059 } 15060 off2 := auxIntToInt32(v_0.AuxInt) 15061 sym2 := auxToSym(v_0.Aux) 15062 base := v_0.Args[0] 15063 val := v_1 15064 mem := v_2 15065 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 15066 break 15067 } 15068 v.reset(OpAMD64ORQmodify) 15069 v.AuxInt = int32ToAuxInt(off1 + off2) 15070 v.Aux = symToAux(mergeSym(sym1, sym2)) 15071 v.AddArg3(base, val, mem) 15072 return true 15073 } 15074 return false 15075 } 15076 func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool { 15077 v_1 := v.Args[1] 15078 v_0 := v.Args[0] 15079 // match: (ROLB x (NEGQ y)) 15080 // result: (RORB x y) 15081 for { 15082 x := v_0 15083 if v_1.Op != OpAMD64NEGQ { 15084 break 15085 } 15086 y := v_1.Args[0] 15087 v.reset(OpAMD64RORB) 15088 v.AddArg2(x, y) 15089 return true 15090 } 15091 // match: (ROLB x (NEGL y)) 15092 // result: (RORB x y) 15093 for { 15094 x := v_0 15095 if v_1.Op != OpAMD64NEGL { 15096 break 15097 } 15098 y := v_1.Args[0] 15099 v.reset(OpAMD64RORB) 15100 v.AddArg2(x, y) 15101 return true 15102 } 15103 // match: (ROLB x (MOVQconst [c])) 15104 // result: (ROLBconst [int8(c&7) ] x) 15105 for { 15106 x := v_0 15107 if v_1.Op != OpAMD64MOVQconst { 15108 break 15109 } 15110 c := auxIntToInt64(v_1.AuxInt) 15111 v.reset(OpAMD64ROLBconst) 15112 v.AuxInt = int8ToAuxInt(int8(c & 7)) 15113 v.AddArg(x) 15114 return true 15115 } 15116 // match: (ROLB x (MOVLconst [c])) 15117 // result: (ROLBconst [int8(c&7) ] x) 15118 for { 15119 x := v_0 15120 if v_1.Op != OpAMD64MOVLconst { 15121 break 15122 } 15123 c := auxIntToInt32(v_1.AuxInt) 15124 v.reset(OpAMD64ROLBconst) 15125 v.AuxInt = int8ToAuxInt(int8(c & 7)) 15126 v.AddArg(x) 15127 return true 15128 } 15129 return false 15130 } 15131 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool { 15132 v_0 := v.Args[0] 15133 // match: (ROLBconst x [0]) 15134 // result: x 15135 for { 15136 if auxIntToInt8(v.AuxInt) != 0 { 15137 break 15138 } 15139 x := v_0 15140 v.copyOf(x) 15141 return true 15142 } 15143 return false 15144 } 15145 func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool { 15146 v_1 := v.Args[1] 15147 v_0 := v.Args[0] 15148 // match: (ROLL x (NEGQ y)) 15149 // result: (RORL x y) 15150 for { 15151 x := v_0 15152 if v_1.Op != OpAMD64NEGQ { 15153 break 15154 } 15155 y := v_1.Args[0] 15156 v.reset(OpAMD64RORL) 15157 v.AddArg2(x, y) 15158 return true 15159 } 15160 // match: (ROLL x (NEGL y)) 15161 // result: (RORL x y) 15162 for { 15163 x := v_0 15164 if v_1.Op != OpAMD64NEGL { 15165 break 15166 } 15167 y := v_1.Args[0] 15168 v.reset(OpAMD64RORL) 15169 v.AddArg2(x, y) 15170 return true 15171 } 15172 // match: (ROLL x (MOVQconst [c])) 15173 // result: (ROLLconst [int8(c&31)] x) 15174 for { 15175 x := v_0 15176 if v_1.Op != OpAMD64MOVQconst { 15177 break 15178 } 15179 c := auxIntToInt64(v_1.AuxInt) 15180 v.reset(OpAMD64ROLLconst) 15181 v.AuxInt = int8ToAuxInt(int8(c & 31)) 15182 v.AddArg(x) 15183 return true 15184 } 15185 // match: (ROLL x (MOVLconst [c])) 15186 // result: (ROLLconst [int8(c&31)] x) 15187 for { 15188 x := v_0 15189 if v_1.Op != OpAMD64MOVLconst { 15190 break 15191 } 15192 c := auxIntToInt32(v_1.AuxInt) 15193 v.reset(OpAMD64ROLLconst) 15194 v.AuxInt = int8ToAuxInt(int8(c & 31)) 15195 v.AddArg(x) 15196 return true 15197 } 15198 return false 15199 } 15200 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool { 15201 v_0 := v.Args[0] 15202 // match: (ROLLconst x [0]) 15203 // result: x 15204 for { 15205 if auxIntToInt8(v.AuxInt) != 0 { 15206 break 15207 } 15208 x := v_0 15209 v.copyOf(x) 15210 return true 15211 } 15212 return false 15213 } 15214 func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool { 15215 v_1 := v.Args[1] 15216 v_0 := v.Args[0] 15217 // match: (ROLQ x (NEGQ y)) 15218 // result: (RORQ x y) 15219 for { 15220 x := v_0 15221 if v_1.Op != OpAMD64NEGQ { 15222 break 15223 } 15224 y := v_1.Args[0] 15225 v.reset(OpAMD64RORQ) 15226 v.AddArg2(x, y) 15227 return true 15228 } 15229 // match: (ROLQ x (NEGL y)) 15230 // result: (RORQ x y) 15231 for { 15232 x := v_0 15233 if v_1.Op != OpAMD64NEGL { 15234 break 15235 } 15236 y := v_1.Args[0] 15237 v.reset(OpAMD64RORQ) 15238 v.AddArg2(x, y) 15239 return true 15240 } 15241 // match: (ROLQ x (MOVQconst [c])) 15242 // result: (ROLQconst [int8(c&63)] x) 15243 for { 15244 x := v_0 15245 if v_1.Op != OpAMD64MOVQconst { 15246 break 15247 } 15248 c := auxIntToInt64(v_1.AuxInt) 15249 v.reset(OpAMD64ROLQconst) 15250 v.AuxInt = int8ToAuxInt(int8(c & 63)) 15251 v.AddArg(x) 15252 return true 15253 } 15254 // match: (ROLQ x (MOVLconst [c])) 15255 // result: (ROLQconst [int8(c&63)] x) 15256 for { 15257 x := v_0 15258 if v_1.Op != OpAMD64MOVLconst { 15259 break 15260 } 15261 c := auxIntToInt32(v_1.AuxInt) 15262 v.reset(OpAMD64ROLQconst) 15263 v.AuxInt = int8ToAuxInt(int8(c & 63)) 15264 v.AddArg(x) 15265 return true 15266 } 15267 return false 15268 } 15269 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool { 15270 v_0 := v.Args[0] 15271 // match: (ROLQconst x [0]) 15272 // result: x 15273 for { 15274 if auxIntToInt8(v.AuxInt) != 0 { 15275 break 15276 } 15277 x := v_0 15278 v.copyOf(x) 15279 return true 15280 } 15281 return false 15282 } 15283 func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool { 15284 v_1 := v.Args[1] 15285 v_0 := v.Args[0] 15286 // match: (ROLW x (NEGQ y)) 15287 // result: (RORW x y) 15288 for { 15289 x := v_0 15290 if v_1.Op != OpAMD64NEGQ { 15291 break 15292 } 15293 y := v_1.Args[0] 15294 v.reset(OpAMD64RORW) 15295 v.AddArg2(x, y) 15296 return true 15297 } 15298 // match: (ROLW x (NEGL y)) 15299 // result: (RORW x y) 15300 for { 15301 x := v_0 15302 if v_1.Op != OpAMD64NEGL { 15303 break 15304 } 15305 y := v_1.Args[0] 15306 v.reset(OpAMD64RORW) 15307 v.AddArg2(x, y) 15308 return true 15309 } 15310 // match: (ROLW x (MOVQconst [c])) 15311 // result: (ROLWconst [int8(c&15)] x) 15312 for { 15313 x := v_0 15314 if v_1.Op != OpAMD64MOVQconst { 15315 break 15316 } 15317 c := auxIntToInt64(v_1.AuxInt) 15318 v.reset(OpAMD64ROLWconst) 15319 v.AuxInt = int8ToAuxInt(int8(c & 15)) 15320 v.AddArg(x) 15321 return true 15322 } 15323 // match: (ROLW x (MOVLconst [c])) 15324 // result: (ROLWconst [int8(c&15)] x) 15325 for { 15326 x := v_0 15327 if v_1.Op != OpAMD64MOVLconst { 15328 break 15329 } 15330 c := auxIntToInt32(v_1.AuxInt) 15331 v.reset(OpAMD64ROLWconst) 15332 v.AuxInt = int8ToAuxInt(int8(c & 15)) 15333 v.AddArg(x) 15334 return true 15335 } 15336 return false 15337 } 15338 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool { 15339 v_0 := v.Args[0] 15340 // match: (ROLWconst x [0]) 15341 // result: x 15342 for { 15343 if auxIntToInt8(v.AuxInt) != 0 { 15344 break 15345 } 15346 x := v_0 15347 v.copyOf(x) 15348 return true 15349 } 15350 return false 15351 } 15352 func rewriteValueAMD64_OpAMD64RORB(v *Value) bool { 15353 v_1 := v.Args[1] 15354 v_0 := v.Args[0] 15355 // match: (RORB x (NEGQ y)) 15356 // result: (ROLB x y) 15357 for { 15358 x := v_0 15359 if v_1.Op != OpAMD64NEGQ { 15360 break 15361 } 15362 y := v_1.Args[0] 15363 v.reset(OpAMD64ROLB) 15364 v.AddArg2(x, y) 15365 return true 15366 } 15367 // match: (RORB x (NEGL y)) 15368 // result: (ROLB x y) 15369 for { 15370 x := v_0 15371 if v_1.Op != OpAMD64NEGL { 15372 break 15373 } 15374 y := v_1.Args[0] 15375 v.reset(OpAMD64ROLB) 15376 v.AddArg2(x, y) 15377 return true 15378 } 15379 // match: (RORB x (MOVQconst [c])) 15380 // result: (ROLBconst [int8((-c)&7) ] x) 15381 for { 15382 x := v_0 15383 if v_1.Op != OpAMD64MOVQconst { 15384 break 15385 } 15386 c := auxIntToInt64(v_1.AuxInt) 15387 v.reset(OpAMD64ROLBconst) 15388 v.AuxInt = int8ToAuxInt(int8((-c) & 7)) 15389 v.AddArg(x) 15390 return true 15391 } 15392 // match: (RORB x (MOVLconst [c])) 15393 // result: (ROLBconst [int8((-c)&7) ] x) 15394 for { 15395 x := v_0 15396 if v_1.Op != OpAMD64MOVLconst { 15397 break 15398 } 15399 c := auxIntToInt32(v_1.AuxInt) 15400 v.reset(OpAMD64ROLBconst) 15401 v.AuxInt = int8ToAuxInt(int8((-c) & 7)) 15402 v.AddArg(x) 15403 return true 15404 } 15405 return false 15406 } 15407 func rewriteValueAMD64_OpAMD64RORL(v *Value) bool { 15408 v_1 := v.Args[1] 15409 v_0 := v.Args[0] 15410 // match: (RORL x (NEGQ y)) 15411 // result: (ROLL x y) 15412 for { 15413 x := v_0 15414 if v_1.Op != OpAMD64NEGQ { 15415 break 15416 } 15417 y := v_1.Args[0] 15418 v.reset(OpAMD64ROLL) 15419 v.AddArg2(x, y) 15420 return true 15421 } 15422 // match: (RORL x (NEGL y)) 15423 // result: (ROLL x y) 15424 for { 15425 x := v_0 15426 if v_1.Op != OpAMD64NEGL { 15427 break 15428 } 15429 y := v_1.Args[0] 15430 v.reset(OpAMD64ROLL) 15431 v.AddArg2(x, y) 15432 return true 15433 } 15434 // match: (RORL x (MOVQconst [c])) 15435 // result: (ROLLconst [int8((-c)&31)] x) 15436 for { 15437 x := v_0 15438 if v_1.Op != OpAMD64MOVQconst { 15439 break 15440 } 15441 c := auxIntToInt64(v_1.AuxInt) 15442 v.reset(OpAMD64ROLLconst) 15443 v.AuxInt = int8ToAuxInt(int8((-c) & 31)) 15444 v.AddArg(x) 15445 return true 15446 } 15447 // match: (RORL x (MOVLconst [c])) 15448 // result: (ROLLconst [int8((-c)&31)] x) 15449 for { 15450 x := v_0 15451 if v_1.Op != OpAMD64MOVLconst { 15452 break 15453 } 15454 c := auxIntToInt32(v_1.AuxInt) 15455 v.reset(OpAMD64ROLLconst) 15456 v.AuxInt = int8ToAuxInt(int8((-c) & 31)) 15457 v.AddArg(x) 15458 return true 15459 } 15460 return false 15461 } 15462 func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool { 15463 v_1 := v.Args[1] 15464 v_0 := v.Args[0] 15465 // match: (RORQ x (NEGQ y)) 15466 // result: (ROLQ x y) 15467 for { 15468 x := v_0 15469 if v_1.Op != OpAMD64NEGQ { 15470 break 15471 } 15472 y := v_1.Args[0] 15473 v.reset(OpAMD64ROLQ) 15474 v.AddArg2(x, y) 15475 return true 15476 } 15477 // match: (RORQ x (NEGL y)) 15478 // result: (ROLQ x y) 15479 for { 15480 x := v_0 15481 if v_1.Op != OpAMD64NEGL { 15482 break 15483 } 15484 y := v_1.Args[0] 15485 v.reset(OpAMD64ROLQ) 15486 v.AddArg2(x, y) 15487 return true 15488 } 15489 // match: (RORQ x (MOVQconst [c])) 15490 // result: (ROLQconst [int8((-c)&63)] x) 15491 for { 15492 x := v_0 15493 if v_1.Op != OpAMD64MOVQconst { 15494 break 15495 } 15496 c := auxIntToInt64(v_1.AuxInt) 15497 v.reset(OpAMD64ROLQconst) 15498 v.AuxInt = int8ToAuxInt(int8((-c) & 63)) 15499 v.AddArg(x) 15500 return true 15501 } 15502 // match: (RORQ x (MOVLconst [c])) 15503 // result: (ROLQconst [int8((-c)&63)] x) 15504 for { 15505 x := v_0 15506 if v_1.Op != OpAMD64MOVLconst { 15507 break 15508 } 15509 c := auxIntToInt32(v_1.AuxInt) 15510 v.reset(OpAMD64ROLQconst) 15511 v.AuxInt = int8ToAuxInt(int8((-c) & 63)) 15512 v.AddArg(x) 15513 return true 15514 } 15515 return false 15516 } 15517 func rewriteValueAMD64_OpAMD64RORW(v *Value) bool { 15518 v_1 := v.Args[1] 15519 v_0 := v.Args[0] 15520 // match: (RORW x (NEGQ y)) 15521 // result: (ROLW x y) 15522 for { 15523 x := v_0 15524 if v_1.Op != OpAMD64NEGQ { 15525 break 15526 } 15527 y := v_1.Args[0] 15528 v.reset(OpAMD64ROLW) 15529 v.AddArg2(x, y) 15530 return true 15531 } 15532 // match: (RORW x (NEGL y)) 15533 // result: (ROLW x y) 15534 for { 15535 x := v_0 15536 if v_1.Op != OpAMD64NEGL { 15537 break 15538 } 15539 y := v_1.Args[0] 15540 v.reset(OpAMD64ROLW) 15541 v.AddArg2(x, y) 15542 return true 15543 } 15544 // match: (RORW x (MOVQconst [c])) 15545 // result: (ROLWconst [int8((-c)&15)] x) 15546 for { 15547 x := v_0 15548 if v_1.Op != OpAMD64MOVQconst { 15549 break 15550 } 15551 c := auxIntToInt64(v_1.AuxInt) 15552 v.reset(OpAMD64ROLWconst) 15553 v.AuxInt = int8ToAuxInt(int8((-c) & 15)) 15554 v.AddArg(x) 15555 return true 15556 } 15557 // match: (RORW x (MOVLconst [c])) 15558 // result: (ROLWconst [int8((-c)&15)] x) 15559 for { 15560 x := v_0 15561 if v_1.Op != OpAMD64MOVLconst { 15562 break 15563 } 15564 c := auxIntToInt32(v_1.AuxInt) 15565 v.reset(OpAMD64ROLWconst) 15566 v.AuxInt = int8ToAuxInt(int8((-c) & 15)) 15567 v.AddArg(x) 15568 return true 15569 } 15570 return false 15571 } 15572 func rewriteValueAMD64_OpAMD64SARB(v *Value) bool { 15573 v_1 := v.Args[1] 15574 v_0 := v.Args[0] 15575 // match: (SARB x (MOVQconst [c])) 15576 // result: (SARBconst [int8(min(int64(c)&31,7))] x) 15577 for { 15578 x := v_0 15579 if v_1.Op != OpAMD64MOVQconst { 15580 break 15581 } 15582 c := auxIntToInt64(v_1.AuxInt) 15583 v.reset(OpAMD64SARBconst) 15584 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7))) 15585 v.AddArg(x) 15586 return true 15587 } 15588 // match: (SARB x (MOVLconst [c])) 15589 // result: (SARBconst [int8(min(int64(c)&31,7))] x) 15590 for { 15591 x := v_0 15592 if v_1.Op != OpAMD64MOVLconst { 15593 break 15594 } 15595 c := auxIntToInt32(v_1.AuxInt) 15596 v.reset(OpAMD64SARBconst) 15597 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7))) 15598 v.AddArg(x) 15599 return true 15600 } 15601 return false 15602 } 15603 func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool { 15604 v_0 := v.Args[0] 15605 // match: (SARBconst x [0]) 15606 // result: x 15607 for { 15608 if auxIntToInt8(v.AuxInt) != 0 { 15609 break 15610 } 15611 x := v_0 15612 v.copyOf(x) 15613 return true 15614 } 15615 // match: (SARBconst [c] (MOVQconst [d])) 15616 // result: (MOVQconst [int64(int8(d))>>uint64(c)]) 15617 for { 15618 c := auxIntToInt8(v.AuxInt) 15619 if v_0.Op != OpAMD64MOVQconst { 15620 break 15621 } 15622 d := auxIntToInt64(v_0.AuxInt) 15623 v.reset(OpAMD64MOVQconst) 15624 v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c)) 15625 return true 15626 } 15627 return false 15628 } 15629 func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { 15630 v_1 := v.Args[1] 15631 v_0 := v.Args[0] 15632 b := v.Block 15633 // match: (SARL x (MOVQconst [c])) 15634 // result: (SARLconst [int8(c&31)] x) 15635 for { 15636 x := v_0 15637 if v_1.Op != OpAMD64MOVQconst { 15638 break 15639 } 15640 c := auxIntToInt64(v_1.AuxInt) 15641 v.reset(OpAMD64SARLconst) 15642 v.AuxInt = int8ToAuxInt(int8(c & 31)) 15643 v.AddArg(x) 15644 return true 15645 } 15646 // match: (SARL x (MOVLconst [c])) 15647 // result: (SARLconst [int8(c&31)] x) 15648 for { 15649 x := v_0 15650 if v_1.Op != OpAMD64MOVLconst { 15651 break 15652 } 15653 c := auxIntToInt32(v_1.AuxInt) 15654 v.reset(OpAMD64SARLconst) 15655 v.AuxInt = int8ToAuxInt(int8(c & 31)) 15656 v.AddArg(x) 15657 return true 15658 } 15659 // match: (SARL x (ADDQconst [c] y)) 15660 // cond: c & 31 == 0 15661 // result: (SARL x y) 15662 for { 15663 x := v_0 15664 if v_1.Op != OpAMD64ADDQconst { 15665 break 15666 } 15667 c := auxIntToInt32(v_1.AuxInt) 15668 y := v_1.Args[0] 15669 if !(c&31 == 0) { 15670 break 15671 } 15672 v.reset(OpAMD64SARL) 15673 v.AddArg2(x, y) 15674 return true 15675 } 15676 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 15677 // cond: c & 31 == 0 15678 // result: (SARL x (NEGQ <t> y)) 15679 for { 15680 x := v_0 15681 if v_1.Op != OpAMD64NEGQ { 15682 break 15683 } 15684 t := v_1.Type 15685 v_1_0 := v_1.Args[0] 15686 if v_1_0.Op != OpAMD64ADDQconst { 15687 break 15688 } 15689 c := auxIntToInt32(v_1_0.AuxInt) 15690 y := v_1_0.Args[0] 15691 if !(c&31 == 0) { 15692 break 15693 } 15694 v.reset(OpAMD64SARL) 15695 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 15696 v0.AddArg(y) 15697 v.AddArg2(x, v0) 15698 return true 15699 } 15700 // match: (SARL x (ANDQconst [c] y)) 15701 // cond: c & 31 == 31 15702 // result: (SARL x y) 15703 for { 15704 x := v_0 15705 if v_1.Op != OpAMD64ANDQconst { 15706 break 15707 } 15708 c := auxIntToInt32(v_1.AuxInt) 15709 y := v_1.Args[0] 15710 if !(c&31 == 31) { 15711 break 15712 } 15713 v.reset(OpAMD64SARL) 15714 v.AddArg2(x, y) 15715 return true 15716 } 15717 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 15718 // cond: c & 31 == 31 15719 // result: (SARL x (NEGQ <t> y)) 15720 for { 15721 x := v_0 15722 if v_1.Op != OpAMD64NEGQ { 15723 break 15724 } 15725 t := v_1.Type 15726 v_1_0 := v_1.Args[0] 15727 if v_1_0.Op != OpAMD64ANDQconst { 15728 break 15729 } 15730 c := auxIntToInt32(v_1_0.AuxInt) 15731 y := v_1_0.Args[0] 15732 if !(c&31 == 31) { 15733 break 15734 } 15735 v.reset(OpAMD64SARL) 15736 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 15737 v0.AddArg(y) 15738 v.AddArg2(x, v0) 15739 return true 15740 } 15741 // match: (SARL x (ADDLconst [c] y)) 15742 // cond: c & 31 == 0 15743 // result: (SARL x y) 15744 for { 15745 x := v_0 15746 if v_1.Op != OpAMD64ADDLconst { 15747 break 15748 } 15749 c := auxIntToInt32(v_1.AuxInt) 15750 y := v_1.Args[0] 15751 if !(c&31 == 0) { 15752 break 15753 } 15754 v.reset(OpAMD64SARL) 15755 v.AddArg2(x, y) 15756 return true 15757 } 15758 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 15759 // cond: c & 31 == 0 15760 // result: (SARL x (NEGL <t> y)) 15761 for { 15762 x := v_0 15763 if v_1.Op != OpAMD64NEGL { 15764 break 15765 } 15766 t := v_1.Type 15767 v_1_0 := v_1.Args[0] 15768 if v_1_0.Op != OpAMD64ADDLconst { 15769 break 15770 } 15771 c := auxIntToInt32(v_1_0.AuxInt) 15772 y := v_1_0.Args[0] 15773 if !(c&31 == 0) { 15774 break 15775 } 15776 v.reset(OpAMD64SARL) 15777 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 15778 v0.AddArg(y) 15779 v.AddArg2(x, v0) 15780 return true 15781 } 15782 // match: (SARL x (ANDLconst [c] y)) 15783 // cond: c & 31 == 31 15784 // result: (SARL x y) 15785 for { 15786 x := v_0 15787 if v_1.Op != OpAMD64ANDLconst { 15788 break 15789 } 15790 c := auxIntToInt32(v_1.AuxInt) 15791 y := v_1.Args[0] 15792 if !(c&31 == 31) { 15793 break 15794 } 15795 v.reset(OpAMD64SARL) 15796 v.AddArg2(x, y) 15797 return true 15798 } 15799 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 15800 // cond: c & 31 == 31 15801 // result: (SARL x (NEGL <t> y)) 15802 for { 15803 x := v_0 15804 if v_1.Op != OpAMD64NEGL { 15805 break 15806 } 15807 t := v_1.Type 15808 v_1_0 := v_1.Args[0] 15809 if v_1_0.Op != OpAMD64ANDLconst { 15810 break 15811 } 15812 c := auxIntToInt32(v_1_0.AuxInt) 15813 y := v_1_0.Args[0] 15814 if !(c&31 == 31) { 15815 break 15816 } 15817 v.reset(OpAMD64SARL) 15818 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 15819 v0.AddArg(y) 15820 v.AddArg2(x, v0) 15821 return true 15822 } 15823 // match: (SARL l:(MOVLload [off] {sym} ptr mem) x) 15824 // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) 15825 // result: (SARXLload [off] {sym} ptr x mem) 15826 for { 15827 l := v_0 15828 if l.Op != OpAMD64MOVLload { 15829 break 15830 } 15831 off := auxIntToInt32(l.AuxInt) 15832 sym := auxToSym(l.Aux) 15833 mem := l.Args[1] 15834 ptr := l.Args[0] 15835 x := v_1 15836 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { 15837 break 15838 } 15839 v.reset(OpAMD64SARXLload) 15840 v.AuxInt = int32ToAuxInt(off) 15841 v.Aux = symToAux(sym) 15842 v.AddArg3(ptr, x, mem) 15843 return true 15844 } 15845 return false 15846 } 15847 func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool { 15848 v_0 := v.Args[0] 15849 // match: (SARLconst x [0]) 15850 // result: x 15851 for { 15852 if auxIntToInt8(v.AuxInt) != 0 { 15853 break 15854 } 15855 x := v_0 15856 v.copyOf(x) 15857 return true 15858 } 15859 // match: (SARLconst [c] (MOVQconst [d])) 15860 // result: (MOVQconst [int64(int32(d))>>uint64(c)]) 15861 for { 15862 c := auxIntToInt8(v.AuxInt) 15863 if v_0.Op != OpAMD64MOVQconst { 15864 break 15865 } 15866 d := auxIntToInt64(v_0.AuxInt) 15867 v.reset(OpAMD64MOVQconst) 15868 v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c)) 15869 return true 15870 } 15871 return false 15872 } 15873 func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { 15874 v_1 := v.Args[1] 15875 v_0 := v.Args[0] 15876 b := v.Block 15877 // match: (SARQ x (MOVQconst [c])) 15878 // result: (SARQconst [int8(c&63)] x) 15879 for { 15880 x := v_0 15881 if v_1.Op != OpAMD64MOVQconst { 15882 break 15883 } 15884 c := auxIntToInt64(v_1.AuxInt) 15885 v.reset(OpAMD64SARQconst) 15886 v.AuxInt = int8ToAuxInt(int8(c & 63)) 15887 v.AddArg(x) 15888 return true 15889 } 15890 // match: (SARQ x (MOVLconst [c])) 15891 // result: (SARQconst [int8(c&63)] x) 15892 for { 15893 x := v_0 15894 if v_1.Op != OpAMD64MOVLconst { 15895 break 15896 } 15897 c := auxIntToInt32(v_1.AuxInt) 15898 v.reset(OpAMD64SARQconst) 15899 v.AuxInt = int8ToAuxInt(int8(c & 63)) 15900 v.AddArg(x) 15901 return true 15902 } 15903 // match: (SARQ x (ADDQconst [c] y)) 15904 // cond: c & 63 == 0 15905 // result: (SARQ x y) 15906 for { 15907 x := v_0 15908 if v_1.Op != OpAMD64ADDQconst { 15909 break 15910 } 15911 c := auxIntToInt32(v_1.AuxInt) 15912 y := v_1.Args[0] 15913 if !(c&63 == 0) { 15914 break 15915 } 15916 v.reset(OpAMD64SARQ) 15917 v.AddArg2(x, y) 15918 return true 15919 } 15920 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 15921 // cond: c & 63 == 0 15922 // result: (SARQ x (NEGQ <t> y)) 15923 for { 15924 x := v_0 15925 if v_1.Op != OpAMD64NEGQ { 15926 break 15927 } 15928 t := v_1.Type 15929 v_1_0 := v_1.Args[0] 15930 if v_1_0.Op != OpAMD64ADDQconst { 15931 break 15932 } 15933 c := auxIntToInt32(v_1_0.AuxInt) 15934 y := v_1_0.Args[0] 15935 if !(c&63 == 0) { 15936 break 15937 } 15938 v.reset(OpAMD64SARQ) 15939 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 15940 v0.AddArg(y) 15941 v.AddArg2(x, v0) 15942 return true 15943 } 15944 // match: (SARQ x (ANDQconst [c] y)) 15945 // cond: c & 63 == 63 15946 // result: (SARQ x y) 15947 for { 15948 x := v_0 15949 if v_1.Op != OpAMD64ANDQconst { 15950 break 15951 } 15952 c := auxIntToInt32(v_1.AuxInt) 15953 y := v_1.Args[0] 15954 if !(c&63 == 63) { 15955 break 15956 } 15957 v.reset(OpAMD64SARQ) 15958 v.AddArg2(x, y) 15959 return true 15960 } 15961 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 15962 // cond: c & 63 == 63 15963 // result: (SARQ x (NEGQ <t> y)) 15964 for { 15965 x := v_0 15966 if v_1.Op != OpAMD64NEGQ { 15967 break 15968 } 15969 t := v_1.Type 15970 v_1_0 := v_1.Args[0] 15971 if v_1_0.Op != OpAMD64ANDQconst { 15972 break 15973 } 15974 c := auxIntToInt32(v_1_0.AuxInt) 15975 y := v_1_0.Args[0] 15976 if !(c&63 == 63) { 15977 break 15978 } 15979 v.reset(OpAMD64SARQ) 15980 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 15981 v0.AddArg(y) 15982 v.AddArg2(x, v0) 15983 return true 15984 } 15985 // match: (SARQ x (ADDLconst [c] y)) 15986 // cond: c & 63 == 0 15987 // result: (SARQ x y) 15988 for { 15989 x := v_0 15990 if v_1.Op != OpAMD64ADDLconst { 15991 break 15992 } 15993 c := auxIntToInt32(v_1.AuxInt) 15994 y := v_1.Args[0] 15995 if !(c&63 == 0) { 15996 break 15997 } 15998 v.reset(OpAMD64SARQ) 15999 v.AddArg2(x, y) 16000 return true 16001 } 16002 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 16003 // cond: c & 63 == 0 16004 // result: (SARQ x (NEGL <t> y)) 16005 for { 16006 x := v_0 16007 if v_1.Op != OpAMD64NEGL { 16008 break 16009 } 16010 t := v_1.Type 16011 v_1_0 := v_1.Args[0] 16012 if v_1_0.Op != OpAMD64ADDLconst { 16013 break 16014 } 16015 c := auxIntToInt32(v_1_0.AuxInt) 16016 y := v_1_0.Args[0] 16017 if !(c&63 == 0) { 16018 break 16019 } 16020 v.reset(OpAMD64SARQ) 16021 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 16022 v0.AddArg(y) 16023 v.AddArg2(x, v0) 16024 return true 16025 } 16026 // match: (SARQ x (ANDLconst [c] y)) 16027 // cond: c & 63 == 63 16028 // result: (SARQ x y) 16029 for { 16030 x := v_0 16031 if v_1.Op != OpAMD64ANDLconst { 16032 break 16033 } 16034 c := auxIntToInt32(v_1.AuxInt) 16035 y := v_1.Args[0] 16036 if !(c&63 == 63) { 16037 break 16038 } 16039 v.reset(OpAMD64SARQ) 16040 v.AddArg2(x, y) 16041 return true 16042 } 16043 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 16044 // cond: c & 63 == 63 16045 // result: (SARQ x (NEGL <t> y)) 16046 for { 16047 x := v_0 16048 if v_1.Op != OpAMD64NEGL { 16049 break 16050 } 16051 t := v_1.Type 16052 v_1_0 := v_1.Args[0] 16053 if v_1_0.Op != OpAMD64ANDLconst { 16054 break 16055 } 16056 c := auxIntToInt32(v_1_0.AuxInt) 16057 y := v_1_0.Args[0] 16058 if !(c&63 == 63) { 16059 break 16060 } 16061 v.reset(OpAMD64SARQ) 16062 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 16063 v0.AddArg(y) 16064 v.AddArg2(x, v0) 16065 return true 16066 } 16067 // match: (SARQ l:(MOVQload [off] {sym} ptr mem) x) 16068 // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) 16069 // result: (SARXQload [off] {sym} ptr x mem) 16070 for { 16071 l := v_0 16072 if l.Op != OpAMD64MOVQload { 16073 break 16074 } 16075 off := auxIntToInt32(l.AuxInt) 16076 sym := auxToSym(l.Aux) 16077 mem := l.Args[1] 16078 ptr := l.Args[0] 16079 x := v_1 16080 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { 16081 break 16082 } 16083 v.reset(OpAMD64SARXQload) 16084 v.AuxInt = int32ToAuxInt(off) 16085 v.Aux = symToAux(sym) 16086 v.AddArg3(ptr, x, mem) 16087 return true 16088 } 16089 return false 16090 } 16091 func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool { 16092 v_0 := v.Args[0] 16093 // match: (SARQconst x [0]) 16094 // result: x 16095 for { 16096 if auxIntToInt8(v.AuxInt) != 0 { 16097 break 16098 } 16099 x := v_0 16100 v.copyOf(x) 16101 return true 16102 } 16103 // match: (SARQconst [c] (MOVQconst [d])) 16104 // result: (MOVQconst [d>>uint64(c)]) 16105 for { 16106 c := auxIntToInt8(v.AuxInt) 16107 if v_0.Op != OpAMD64MOVQconst { 16108 break 16109 } 16110 d := auxIntToInt64(v_0.AuxInt) 16111 v.reset(OpAMD64MOVQconst) 16112 v.AuxInt = int64ToAuxInt(d >> uint64(c)) 16113 return true 16114 } 16115 return false 16116 } 16117 func rewriteValueAMD64_OpAMD64SARW(v *Value) bool { 16118 v_1 := v.Args[1] 16119 v_0 := v.Args[0] 16120 // match: (SARW x (MOVQconst [c])) 16121 // result: (SARWconst [int8(min(int64(c)&31,15))] x) 16122 for { 16123 x := v_0 16124 if v_1.Op != OpAMD64MOVQconst { 16125 break 16126 } 16127 c := auxIntToInt64(v_1.AuxInt) 16128 v.reset(OpAMD64SARWconst) 16129 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15))) 16130 v.AddArg(x) 16131 return true 16132 } 16133 // match: (SARW x (MOVLconst [c])) 16134 // result: (SARWconst [int8(min(int64(c)&31,15))] x) 16135 for { 16136 x := v_0 16137 if v_1.Op != OpAMD64MOVLconst { 16138 break 16139 } 16140 c := auxIntToInt32(v_1.AuxInt) 16141 v.reset(OpAMD64SARWconst) 16142 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15))) 16143 v.AddArg(x) 16144 return true 16145 } 16146 return false 16147 } 16148 func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool { 16149 v_0 := v.Args[0] 16150 // match: (SARWconst x [0]) 16151 // result: x 16152 for { 16153 if auxIntToInt8(v.AuxInt) != 0 { 16154 break 16155 } 16156 x := v_0 16157 v.copyOf(x) 16158 return true 16159 } 16160 // match: (SARWconst [c] (MOVQconst [d])) 16161 // result: (MOVQconst [int64(int16(d))>>uint64(c)]) 16162 for { 16163 c := auxIntToInt8(v.AuxInt) 16164 if v_0.Op != OpAMD64MOVQconst { 16165 break 16166 } 16167 d := auxIntToInt64(v_0.AuxInt) 16168 v.reset(OpAMD64MOVQconst) 16169 v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c)) 16170 return true 16171 } 16172 return false 16173 } 16174 func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool { 16175 v_2 := v.Args[2] 16176 v_1 := v.Args[1] 16177 v_0 := v.Args[0] 16178 b := v.Block 16179 typ := &b.Func.Config.Types 16180 // match: (SARXLload [off] {sym} ptr (MOVLconst [c]) mem) 16181 // result: (SARLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem)) 16182 for { 16183 off := auxIntToInt32(v.AuxInt) 16184 sym := auxToSym(v.Aux) 16185 ptr := v_0 16186 if v_1.Op != OpAMD64MOVLconst { 16187 break 16188 } 16189 c := auxIntToInt32(v_1.AuxInt) 16190 mem := v_2 16191 v.reset(OpAMD64SARLconst) 16192 v.AuxInt = int8ToAuxInt(int8(c & 31)) 16193 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 16194 v0.AuxInt = int32ToAuxInt(off) 16195 v0.Aux = symToAux(sym) 16196 v0.AddArg2(ptr, mem) 16197 v.AddArg(v0) 16198 return true 16199 } 16200 return false 16201 } 16202 func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool { 16203 v_2 := v.Args[2] 16204 v_1 := v.Args[1] 16205 v_0 := v.Args[0] 16206 b := v.Block 16207 typ := &b.Func.Config.Types 16208 // match: (SARXQload [off] {sym} ptr (MOVQconst [c]) mem) 16209 // result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) 16210 for { 16211 off := auxIntToInt32(v.AuxInt) 16212 sym := auxToSym(v.Aux) 16213 ptr := v_0 16214 if v_1.Op != OpAMD64MOVQconst { 16215 break 16216 } 16217 c := auxIntToInt64(v_1.AuxInt) 16218 mem := v_2 16219 v.reset(OpAMD64SARQconst) 16220 v.AuxInt = int8ToAuxInt(int8(c & 63)) 16221 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 16222 v0.AuxInt = int32ToAuxInt(off) 16223 v0.Aux = symToAux(sym) 16224 v0.AddArg2(ptr, mem) 16225 v.AddArg(v0) 16226 return true 16227 } 16228 // match: (SARXQload [off] {sym} ptr (MOVLconst [c]) mem) 16229 // result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) 16230 for { 16231 off := auxIntToInt32(v.AuxInt) 16232 sym := auxToSym(v.Aux) 16233 ptr := v_0 16234 if v_1.Op != OpAMD64MOVLconst { 16235 break 16236 } 16237 c := auxIntToInt32(v_1.AuxInt) 16238 mem := v_2 16239 v.reset(OpAMD64SARQconst) 16240 v.AuxInt = int8ToAuxInt(int8(c & 63)) 16241 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 16242 v0.AuxInt = int32ToAuxInt(off) 16243 v0.Aux = symToAux(sym) 16244 v0.AddArg2(ptr, mem) 16245 v.AddArg(v0) 16246 return true 16247 } 16248 return false 16249 } 16250 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool { 16251 v_0 := v.Args[0] 16252 // match: (SBBLcarrymask (FlagEQ)) 16253 // result: (MOVLconst [0]) 16254 for { 16255 if v_0.Op != OpAMD64FlagEQ { 16256 break 16257 } 16258 v.reset(OpAMD64MOVLconst) 16259 v.AuxInt = int32ToAuxInt(0) 16260 return true 16261 } 16262 // match: (SBBLcarrymask (FlagLT_ULT)) 16263 // result: (MOVLconst [-1]) 16264 for { 16265 if v_0.Op != OpAMD64FlagLT_ULT { 16266 break 16267 } 16268 v.reset(OpAMD64MOVLconst) 16269 v.AuxInt = int32ToAuxInt(-1) 16270 return true 16271 } 16272 // match: (SBBLcarrymask (FlagLT_UGT)) 16273 // result: (MOVLconst [0]) 16274 for { 16275 if v_0.Op != OpAMD64FlagLT_UGT { 16276 break 16277 } 16278 v.reset(OpAMD64MOVLconst) 16279 v.AuxInt = int32ToAuxInt(0) 16280 return true 16281 } 16282 // match: (SBBLcarrymask (FlagGT_ULT)) 16283 // result: (MOVLconst [-1]) 16284 for { 16285 if v_0.Op != OpAMD64FlagGT_ULT { 16286 break 16287 } 16288 v.reset(OpAMD64MOVLconst) 16289 v.AuxInt = int32ToAuxInt(-1) 16290 return true 16291 } 16292 // match: (SBBLcarrymask (FlagGT_UGT)) 16293 // result: (MOVLconst [0]) 16294 for { 16295 if v_0.Op != OpAMD64FlagGT_UGT { 16296 break 16297 } 16298 v.reset(OpAMD64MOVLconst) 16299 v.AuxInt = int32ToAuxInt(0) 16300 return true 16301 } 16302 return false 16303 } 16304 func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool { 16305 v_2 := v.Args[2] 16306 v_1 := v.Args[1] 16307 v_0 := v.Args[0] 16308 // match: (SBBQ x (MOVQconst [c]) borrow) 16309 // cond: is32Bit(c) 16310 // result: (SBBQconst x [int32(c)] borrow) 16311 for { 16312 x := v_0 16313 if v_1.Op != OpAMD64MOVQconst { 16314 break 16315 } 16316 c := auxIntToInt64(v_1.AuxInt) 16317 borrow := v_2 16318 if !(is32Bit(c)) { 16319 break 16320 } 16321 v.reset(OpAMD64SBBQconst) 16322 v.AuxInt = int32ToAuxInt(int32(c)) 16323 v.AddArg2(x, borrow) 16324 return true 16325 } 16326 // match: (SBBQ x y (FlagEQ)) 16327 // result: (SUBQborrow x y) 16328 for { 16329 x := v_0 16330 y := v_1 16331 if v_2.Op != OpAMD64FlagEQ { 16332 break 16333 } 16334 v.reset(OpAMD64SUBQborrow) 16335 v.AddArg2(x, y) 16336 return true 16337 } 16338 return false 16339 } 16340 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool { 16341 v_0 := v.Args[0] 16342 // match: (SBBQcarrymask (FlagEQ)) 16343 // result: (MOVQconst [0]) 16344 for { 16345 if v_0.Op != OpAMD64FlagEQ { 16346 break 16347 } 16348 v.reset(OpAMD64MOVQconst) 16349 v.AuxInt = int64ToAuxInt(0) 16350 return true 16351 } 16352 // match: (SBBQcarrymask (FlagLT_ULT)) 16353 // result: (MOVQconst [-1]) 16354 for { 16355 if v_0.Op != OpAMD64FlagLT_ULT { 16356 break 16357 } 16358 v.reset(OpAMD64MOVQconst) 16359 v.AuxInt = int64ToAuxInt(-1) 16360 return true 16361 } 16362 // match: (SBBQcarrymask (FlagLT_UGT)) 16363 // result: (MOVQconst [0]) 16364 for { 16365 if v_0.Op != OpAMD64FlagLT_UGT { 16366 break 16367 } 16368 v.reset(OpAMD64MOVQconst) 16369 v.AuxInt = int64ToAuxInt(0) 16370 return true 16371 } 16372 // match: (SBBQcarrymask (FlagGT_ULT)) 16373 // result: (MOVQconst [-1]) 16374 for { 16375 if v_0.Op != OpAMD64FlagGT_ULT { 16376 break 16377 } 16378 v.reset(OpAMD64MOVQconst) 16379 v.AuxInt = int64ToAuxInt(-1) 16380 return true 16381 } 16382 // match: (SBBQcarrymask (FlagGT_UGT)) 16383 // result: (MOVQconst [0]) 16384 for { 16385 if v_0.Op != OpAMD64FlagGT_UGT { 16386 break 16387 } 16388 v.reset(OpAMD64MOVQconst) 16389 v.AuxInt = int64ToAuxInt(0) 16390 return true 16391 } 16392 return false 16393 } 16394 func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool { 16395 v_1 := v.Args[1] 16396 v_0 := v.Args[0] 16397 // match: (SBBQconst x [c] (FlagEQ)) 16398 // result: (SUBQconstborrow x [c]) 16399 for { 16400 c := auxIntToInt32(v.AuxInt) 16401 x := v_0 16402 if v_1.Op != OpAMD64FlagEQ { 16403 break 16404 } 16405 v.reset(OpAMD64SUBQconstborrow) 16406 v.AuxInt = int32ToAuxInt(c) 16407 v.AddArg(x) 16408 return true 16409 } 16410 return false 16411 } 16412 func rewriteValueAMD64_OpAMD64SETA(v *Value) bool { 16413 v_0 := v.Args[0] 16414 // match: (SETA (InvertFlags x)) 16415 // result: (SETB x) 16416 for { 16417 if v_0.Op != OpAMD64InvertFlags { 16418 break 16419 } 16420 x := v_0.Args[0] 16421 v.reset(OpAMD64SETB) 16422 v.AddArg(x) 16423 return true 16424 } 16425 // match: (SETA (FlagEQ)) 16426 // result: (MOVLconst [0]) 16427 for { 16428 if v_0.Op != OpAMD64FlagEQ { 16429 break 16430 } 16431 v.reset(OpAMD64MOVLconst) 16432 v.AuxInt = int32ToAuxInt(0) 16433 return true 16434 } 16435 // match: (SETA (FlagLT_ULT)) 16436 // result: (MOVLconst [0]) 16437 for { 16438 if v_0.Op != OpAMD64FlagLT_ULT { 16439 break 16440 } 16441 v.reset(OpAMD64MOVLconst) 16442 v.AuxInt = int32ToAuxInt(0) 16443 return true 16444 } 16445 // match: (SETA (FlagLT_UGT)) 16446 // result: (MOVLconst [1]) 16447 for { 16448 if v_0.Op != OpAMD64FlagLT_UGT { 16449 break 16450 } 16451 v.reset(OpAMD64MOVLconst) 16452 v.AuxInt = int32ToAuxInt(1) 16453 return true 16454 } 16455 // match: (SETA (FlagGT_ULT)) 16456 // result: (MOVLconst [0]) 16457 for { 16458 if v_0.Op != OpAMD64FlagGT_ULT { 16459 break 16460 } 16461 v.reset(OpAMD64MOVLconst) 16462 v.AuxInt = int32ToAuxInt(0) 16463 return true 16464 } 16465 // match: (SETA (FlagGT_UGT)) 16466 // result: (MOVLconst [1]) 16467 for { 16468 if v_0.Op != OpAMD64FlagGT_UGT { 16469 break 16470 } 16471 v.reset(OpAMD64MOVLconst) 16472 v.AuxInt = int32ToAuxInt(1) 16473 return true 16474 } 16475 return false 16476 } 16477 func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { 16478 v_0 := v.Args[0] 16479 // match: (SETAE (TESTQ x x)) 16480 // result: (ConstBool [true]) 16481 for { 16482 if v_0.Op != OpAMD64TESTQ { 16483 break 16484 } 16485 x := v_0.Args[1] 16486 if x != v_0.Args[0] { 16487 break 16488 } 16489 v.reset(OpConstBool) 16490 v.AuxInt = boolToAuxInt(true) 16491 return true 16492 } 16493 // match: (SETAE (TESTL x x)) 16494 // result: (ConstBool [true]) 16495 for { 16496 if v_0.Op != OpAMD64TESTL { 16497 break 16498 } 16499 x := v_0.Args[1] 16500 if x != v_0.Args[0] { 16501 break 16502 } 16503 v.reset(OpConstBool) 16504 v.AuxInt = boolToAuxInt(true) 16505 return true 16506 } 16507 // match: (SETAE (TESTW x x)) 16508 // result: (ConstBool [true]) 16509 for { 16510 if v_0.Op != OpAMD64TESTW { 16511 break 16512 } 16513 x := v_0.Args[1] 16514 if x != v_0.Args[0] { 16515 break 16516 } 16517 v.reset(OpConstBool) 16518 v.AuxInt = boolToAuxInt(true) 16519 return true 16520 } 16521 // match: (SETAE (TESTB x x)) 16522 // result: (ConstBool [true]) 16523 for { 16524 if v_0.Op != OpAMD64TESTB { 16525 break 16526 } 16527 x := v_0.Args[1] 16528 if x != v_0.Args[0] { 16529 break 16530 } 16531 v.reset(OpConstBool) 16532 v.AuxInt = boolToAuxInt(true) 16533 return true 16534 } 16535 // match: (SETAE (InvertFlags x)) 16536 // result: (SETBE x) 16537 for { 16538 if v_0.Op != OpAMD64InvertFlags { 16539 break 16540 } 16541 x := v_0.Args[0] 16542 v.reset(OpAMD64SETBE) 16543 v.AddArg(x) 16544 return true 16545 } 16546 // match: (SETAE (FlagEQ)) 16547 // result: (MOVLconst [1]) 16548 for { 16549 if v_0.Op != OpAMD64FlagEQ { 16550 break 16551 } 16552 v.reset(OpAMD64MOVLconst) 16553 v.AuxInt = int32ToAuxInt(1) 16554 return true 16555 } 16556 // match: (SETAE (FlagLT_ULT)) 16557 // result: (MOVLconst [0]) 16558 for { 16559 if v_0.Op != OpAMD64FlagLT_ULT { 16560 break 16561 } 16562 v.reset(OpAMD64MOVLconst) 16563 v.AuxInt = int32ToAuxInt(0) 16564 return true 16565 } 16566 // match: (SETAE (FlagLT_UGT)) 16567 // result: (MOVLconst [1]) 16568 for { 16569 if v_0.Op != OpAMD64FlagLT_UGT { 16570 break 16571 } 16572 v.reset(OpAMD64MOVLconst) 16573 v.AuxInt = int32ToAuxInt(1) 16574 return true 16575 } 16576 // match: (SETAE (FlagGT_ULT)) 16577 // result: (MOVLconst [0]) 16578 for { 16579 if v_0.Op != OpAMD64FlagGT_ULT { 16580 break 16581 } 16582 v.reset(OpAMD64MOVLconst) 16583 v.AuxInt = int32ToAuxInt(0) 16584 return true 16585 } 16586 // match: (SETAE (FlagGT_UGT)) 16587 // result: (MOVLconst [1]) 16588 for { 16589 if v_0.Op != OpAMD64FlagGT_UGT { 16590 break 16591 } 16592 v.reset(OpAMD64MOVLconst) 16593 v.AuxInt = int32ToAuxInt(1) 16594 return true 16595 } 16596 return false 16597 } 16598 func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { 16599 v_2 := v.Args[2] 16600 v_1 := v.Args[1] 16601 v_0 := v.Args[0] 16602 b := v.Block 16603 typ := &b.Func.Config.Types 16604 // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem) 16605 // result: (SETBEstore [off] {sym} ptr x mem) 16606 for { 16607 off := auxIntToInt32(v.AuxInt) 16608 sym := auxToSym(v.Aux) 16609 ptr := v_0 16610 if v_1.Op != OpAMD64InvertFlags { 16611 break 16612 } 16613 x := v_1.Args[0] 16614 mem := v_2 16615 v.reset(OpAMD64SETBEstore) 16616 v.AuxInt = int32ToAuxInt(off) 16617 v.Aux = symToAux(sym) 16618 v.AddArg3(ptr, x, mem) 16619 return true 16620 } 16621 // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem) 16622 // cond: is32Bit(int64(off1)+int64(off2)) 16623 // result: (SETAEstore [off1+off2] {sym} base val mem) 16624 for { 16625 off1 := auxIntToInt32(v.AuxInt) 16626 sym := auxToSym(v.Aux) 16627 if v_0.Op != OpAMD64ADDQconst { 16628 break 16629 } 16630 off2 := auxIntToInt32(v_0.AuxInt) 16631 base := v_0.Args[0] 16632 val := v_1 16633 mem := v_2 16634 if !(is32Bit(int64(off1) + int64(off2))) { 16635 break 16636 } 16637 v.reset(OpAMD64SETAEstore) 16638 v.AuxInt = int32ToAuxInt(off1 + off2) 16639 v.Aux = symToAux(sym) 16640 v.AddArg3(base, val, mem) 16641 return true 16642 } 16643 // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 16644 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 16645 // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 16646 for { 16647 off1 := auxIntToInt32(v.AuxInt) 16648 sym1 := auxToSym(v.Aux) 16649 if v_0.Op != OpAMD64LEAQ { 16650 break 16651 } 16652 off2 := auxIntToInt32(v_0.AuxInt) 16653 sym2 := auxToSym(v_0.Aux) 16654 base := v_0.Args[0] 16655 val := v_1 16656 mem := v_2 16657 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 16658 break 16659 } 16660 v.reset(OpAMD64SETAEstore) 16661 v.AuxInt = int32ToAuxInt(off1 + off2) 16662 v.Aux = symToAux(mergeSym(sym1, sym2)) 16663 v.AddArg3(base, val, mem) 16664 return true 16665 } 16666 // match: (SETAEstore [off] {sym} ptr (FlagEQ) mem) 16667 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 16668 for { 16669 off := auxIntToInt32(v.AuxInt) 16670 sym := auxToSym(v.Aux) 16671 ptr := v_0 16672 if v_1.Op != OpAMD64FlagEQ { 16673 break 16674 } 16675 mem := v_2 16676 v.reset(OpAMD64MOVBstore) 16677 v.AuxInt = int32ToAuxInt(off) 16678 v.Aux = symToAux(sym) 16679 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 16680 v0.AuxInt = int32ToAuxInt(1) 16681 v.AddArg3(ptr, v0, mem) 16682 return true 16683 } 16684 // match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) 16685 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 16686 for { 16687 off := auxIntToInt32(v.AuxInt) 16688 sym := auxToSym(v.Aux) 16689 ptr := v_0 16690 if v_1.Op != OpAMD64FlagLT_ULT { 16691 break 16692 } 16693 mem := v_2 16694 v.reset(OpAMD64MOVBstore) 16695 v.AuxInt = int32ToAuxInt(off) 16696 v.Aux = symToAux(sym) 16697 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 16698 v0.AuxInt = int32ToAuxInt(0) 16699 v.AddArg3(ptr, v0, mem) 16700 return true 16701 } 16702 // match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) 16703 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 16704 for { 16705 off := auxIntToInt32(v.AuxInt) 16706 sym := auxToSym(v.Aux) 16707 ptr := v_0 16708 if v_1.Op != OpAMD64FlagLT_UGT { 16709 break 16710 } 16711 mem := v_2 16712 v.reset(OpAMD64MOVBstore) 16713 v.AuxInt = int32ToAuxInt(off) 16714 v.Aux = symToAux(sym) 16715 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 16716 v0.AuxInt = int32ToAuxInt(1) 16717 v.AddArg3(ptr, v0, mem) 16718 return true 16719 } 16720 // match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) 16721 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 16722 for { 16723 off := auxIntToInt32(v.AuxInt) 16724 sym := auxToSym(v.Aux) 16725 ptr := v_0 16726 if v_1.Op != OpAMD64FlagGT_ULT { 16727 break 16728 } 16729 mem := v_2 16730 v.reset(OpAMD64MOVBstore) 16731 v.AuxInt = int32ToAuxInt(off) 16732 v.Aux = symToAux(sym) 16733 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 16734 v0.AuxInt = int32ToAuxInt(0) 16735 v.AddArg3(ptr, v0, mem) 16736 return true 16737 } 16738 // match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) 16739 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 16740 for { 16741 off := auxIntToInt32(v.AuxInt) 16742 sym := auxToSym(v.Aux) 16743 ptr := v_0 16744 if v_1.Op != OpAMD64FlagGT_UGT { 16745 break 16746 } 16747 mem := v_2 16748 v.reset(OpAMD64MOVBstore) 16749 v.AuxInt = int32ToAuxInt(off) 16750 v.Aux = symToAux(sym) 16751 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 16752 v0.AuxInt = int32ToAuxInt(1) 16753 v.AddArg3(ptr, v0, mem) 16754 return true 16755 } 16756 return false 16757 } 16758 func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { 16759 v_2 := v.Args[2] 16760 v_1 := v.Args[1] 16761 v_0 := v.Args[0] 16762 b := v.Block 16763 typ := &b.Func.Config.Types 16764 // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem) 16765 // result: (SETBstore [off] {sym} ptr x mem) 16766 for { 16767 off := auxIntToInt32(v.AuxInt) 16768 sym := auxToSym(v.Aux) 16769 ptr := v_0 16770 if v_1.Op != OpAMD64InvertFlags { 16771 break 16772 } 16773 x := v_1.Args[0] 16774 mem := v_2 16775 v.reset(OpAMD64SETBstore) 16776 v.AuxInt = int32ToAuxInt(off) 16777 v.Aux = symToAux(sym) 16778 v.AddArg3(ptr, x, mem) 16779 return true 16780 } 16781 // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem) 16782 // cond: is32Bit(int64(off1)+int64(off2)) 16783 // result: (SETAstore [off1+off2] {sym} base val mem) 16784 for { 16785 off1 := auxIntToInt32(v.AuxInt) 16786 sym := auxToSym(v.Aux) 16787 if v_0.Op != OpAMD64ADDQconst { 16788 break 16789 } 16790 off2 := auxIntToInt32(v_0.AuxInt) 16791 base := v_0.Args[0] 16792 val := v_1 16793 mem := v_2 16794 if !(is32Bit(int64(off1) + int64(off2))) { 16795 break 16796 } 16797 v.reset(OpAMD64SETAstore) 16798 v.AuxInt = int32ToAuxInt(off1 + off2) 16799 v.Aux = symToAux(sym) 16800 v.AddArg3(base, val, mem) 16801 return true 16802 } 16803 // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 16804 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 16805 // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 16806 for { 16807 off1 := auxIntToInt32(v.AuxInt) 16808 sym1 := auxToSym(v.Aux) 16809 if v_0.Op != OpAMD64LEAQ { 16810 break 16811 } 16812 off2 := auxIntToInt32(v_0.AuxInt) 16813 sym2 := auxToSym(v_0.Aux) 16814 base := v_0.Args[0] 16815 val := v_1 16816 mem := v_2 16817 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 16818 break 16819 } 16820 v.reset(OpAMD64SETAstore) 16821 v.AuxInt = int32ToAuxInt(off1 + off2) 16822 v.Aux = symToAux(mergeSym(sym1, sym2)) 16823 v.AddArg3(base, val, mem) 16824 return true 16825 } 16826 // match: (SETAstore [off] {sym} ptr (FlagEQ) mem) 16827 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 16828 for { 16829 off := auxIntToInt32(v.AuxInt) 16830 sym := auxToSym(v.Aux) 16831 ptr := v_0 16832 if v_1.Op != OpAMD64FlagEQ { 16833 break 16834 } 16835 mem := v_2 16836 v.reset(OpAMD64MOVBstore) 16837 v.AuxInt = int32ToAuxInt(off) 16838 v.Aux = symToAux(sym) 16839 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 16840 v0.AuxInt = int32ToAuxInt(0) 16841 v.AddArg3(ptr, v0, mem) 16842 return true 16843 } 16844 // match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem) 16845 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 16846 for { 16847 off := auxIntToInt32(v.AuxInt) 16848 sym := auxToSym(v.Aux) 16849 ptr := v_0 16850 if v_1.Op != OpAMD64FlagLT_ULT { 16851 break 16852 } 16853 mem := v_2 16854 v.reset(OpAMD64MOVBstore) 16855 v.AuxInt = int32ToAuxInt(off) 16856 v.Aux = symToAux(sym) 16857 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 16858 v0.AuxInt = int32ToAuxInt(0) 16859 v.AddArg3(ptr, v0, mem) 16860 return true 16861 } 16862 // match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem) 16863 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 16864 for { 16865 off := auxIntToInt32(v.AuxInt) 16866 sym := auxToSym(v.Aux) 16867 ptr := v_0 16868 if v_1.Op != OpAMD64FlagLT_UGT { 16869 break 16870 } 16871 mem := v_2 16872 v.reset(OpAMD64MOVBstore) 16873 v.AuxInt = int32ToAuxInt(off) 16874 v.Aux = symToAux(sym) 16875 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 16876 v0.AuxInt = int32ToAuxInt(1) 16877 v.AddArg3(ptr, v0, mem) 16878 return true 16879 } 16880 // match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem) 16881 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 16882 for { 16883 off := auxIntToInt32(v.AuxInt) 16884 sym := auxToSym(v.Aux) 16885 ptr := v_0 16886 if v_1.Op != OpAMD64FlagGT_ULT { 16887 break 16888 } 16889 mem := v_2 16890 v.reset(OpAMD64MOVBstore) 16891 v.AuxInt = int32ToAuxInt(off) 16892 v.Aux = symToAux(sym) 16893 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 16894 v0.AuxInt = int32ToAuxInt(0) 16895 v.AddArg3(ptr, v0, mem) 16896 return true 16897 } 16898 // match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem) 16899 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 16900 for { 16901 off := auxIntToInt32(v.AuxInt) 16902 sym := auxToSym(v.Aux) 16903 ptr := v_0 16904 if v_1.Op != OpAMD64FlagGT_UGT { 16905 break 16906 } 16907 mem := v_2 16908 v.reset(OpAMD64MOVBstore) 16909 v.AuxInt = int32ToAuxInt(off) 16910 v.Aux = symToAux(sym) 16911 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 16912 v0.AuxInt = int32ToAuxInt(1) 16913 v.AddArg3(ptr, v0, mem) 16914 return true 16915 } 16916 return false 16917 } 16918 func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { 16919 v_0 := v.Args[0] 16920 // match: (SETB (TESTQ x x)) 16921 // result: (ConstBool [false]) 16922 for { 16923 if v_0.Op != OpAMD64TESTQ { 16924 break 16925 } 16926 x := v_0.Args[1] 16927 if x != v_0.Args[0] { 16928 break 16929 } 16930 v.reset(OpConstBool) 16931 v.AuxInt = boolToAuxInt(false) 16932 return true 16933 } 16934 // match: (SETB (TESTL x x)) 16935 // result: (ConstBool [false]) 16936 for { 16937 if v_0.Op != OpAMD64TESTL { 16938 break 16939 } 16940 x := v_0.Args[1] 16941 if x != v_0.Args[0] { 16942 break 16943 } 16944 v.reset(OpConstBool) 16945 v.AuxInt = boolToAuxInt(false) 16946 return true 16947 } 16948 // match: (SETB (TESTW x x)) 16949 // result: (ConstBool [false]) 16950 for { 16951 if v_0.Op != OpAMD64TESTW { 16952 break 16953 } 16954 x := v_0.Args[1] 16955 if x != v_0.Args[0] { 16956 break 16957 } 16958 v.reset(OpConstBool) 16959 v.AuxInt = boolToAuxInt(false) 16960 return true 16961 } 16962 // match: (SETB (TESTB x x)) 16963 // result: (ConstBool [false]) 16964 for { 16965 if v_0.Op != OpAMD64TESTB { 16966 break 16967 } 16968 x := v_0.Args[1] 16969 if x != v_0.Args[0] { 16970 break 16971 } 16972 v.reset(OpConstBool) 16973 v.AuxInt = boolToAuxInt(false) 16974 return true 16975 } 16976 // match: (SETB (BTLconst [0] x)) 16977 // result: (ANDLconst [1] x) 16978 for { 16979 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 { 16980 break 16981 } 16982 x := v_0.Args[0] 16983 v.reset(OpAMD64ANDLconst) 16984 v.AuxInt = int32ToAuxInt(1) 16985 v.AddArg(x) 16986 return true 16987 } 16988 // match: (SETB (BTQconst [0] x)) 16989 // result: (ANDQconst [1] x) 16990 for { 16991 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 { 16992 break 16993 } 16994 x := v_0.Args[0] 16995 v.reset(OpAMD64ANDQconst) 16996 v.AuxInt = int32ToAuxInt(1) 16997 v.AddArg(x) 16998 return true 16999 } 17000 // match: (SETB (InvertFlags x)) 17001 // result: (SETA x) 17002 for { 17003 if v_0.Op != OpAMD64InvertFlags { 17004 break 17005 } 17006 x := v_0.Args[0] 17007 v.reset(OpAMD64SETA) 17008 v.AddArg(x) 17009 return true 17010 } 17011 // match: (SETB (FlagEQ)) 17012 // result: (MOVLconst [0]) 17013 for { 17014 if v_0.Op != OpAMD64FlagEQ { 17015 break 17016 } 17017 v.reset(OpAMD64MOVLconst) 17018 v.AuxInt = int32ToAuxInt(0) 17019 return true 17020 } 17021 // match: (SETB (FlagLT_ULT)) 17022 // result: (MOVLconst [1]) 17023 for { 17024 if v_0.Op != OpAMD64FlagLT_ULT { 17025 break 17026 } 17027 v.reset(OpAMD64MOVLconst) 17028 v.AuxInt = int32ToAuxInt(1) 17029 return true 17030 } 17031 // match: (SETB (FlagLT_UGT)) 17032 // result: (MOVLconst [0]) 17033 for { 17034 if v_0.Op != OpAMD64FlagLT_UGT { 17035 break 17036 } 17037 v.reset(OpAMD64MOVLconst) 17038 v.AuxInt = int32ToAuxInt(0) 17039 return true 17040 } 17041 // match: (SETB (FlagGT_ULT)) 17042 // result: (MOVLconst [1]) 17043 for { 17044 if v_0.Op != OpAMD64FlagGT_ULT { 17045 break 17046 } 17047 v.reset(OpAMD64MOVLconst) 17048 v.AuxInt = int32ToAuxInt(1) 17049 return true 17050 } 17051 // match: (SETB (FlagGT_UGT)) 17052 // result: (MOVLconst [0]) 17053 for { 17054 if v_0.Op != OpAMD64FlagGT_UGT { 17055 break 17056 } 17057 v.reset(OpAMD64MOVLconst) 17058 v.AuxInt = int32ToAuxInt(0) 17059 return true 17060 } 17061 return false 17062 } 17063 func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool { 17064 v_0 := v.Args[0] 17065 // match: (SETBE (InvertFlags x)) 17066 // result: (SETAE x) 17067 for { 17068 if v_0.Op != OpAMD64InvertFlags { 17069 break 17070 } 17071 x := v_0.Args[0] 17072 v.reset(OpAMD64SETAE) 17073 v.AddArg(x) 17074 return true 17075 } 17076 // match: (SETBE (FlagEQ)) 17077 // result: (MOVLconst [1]) 17078 for { 17079 if v_0.Op != OpAMD64FlagEQ { 17080 break 17081 } 17082 v.reset(OpAMD64MOVLconst) 17083 v.AuxInt = int32ToAuxInt(1) 17084 return true 17085 } 17086 // match: (SETBE (FlagLT_ULT)) 17087 // result: (MOVLconst [1]) 17088 for { 17089 if v_0.Op != OpAMD64FlagLT_ULT { 17090 break 17091 } 17092 v.reset(OpAMD64MOVLconst) 17093 v.AuxInt = int32ToAuxInt(1) 17094 return true 17095 } 17096 // match: (SETBE (FlagLT_UGT)) 17097 // result: (MOVLconst [0]) 17098 for { 17099 if v_0.Op != OpAMD64FlagLT_UGT { 17100 break 17101 } 17102 v.reset(OpAMD64MOVLconst) 17103 v.AuxInt = int32ToAuxInt(0) 17104 return true 17105 } 17106 // match: (SETBE (FlagGT_ULT)) 17107 // result: (MOVLconst [1]) 17108 for { 17109 if v_0.Op != OpAMD64FlagGT_ULT { 17110 break 17111 } 17112 v.reset(OpAMD64MOVLconst) 17113 v.AuxInt = int32ToAuxInt(1) 17114 return true 17115 } 17116 // match: (SETBE (FlagGT_UGT)) 17117 // result: (MOVLconst [0]) 17118 for { 17119 if v_0.Op != OpAMD64FlagGT_UGT { 17120 break 17121 } 17122 v.reset(OpAMD64MOVLconst) 17123 v.AuxInt = int32ToAuxInt(0) 17124 return true 17125 } 17126 return false 17127 } 17128 func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { 17129 v_2 := v.Args[2] 17130 v_1 := v.Args[1] 17131 v_0 := v.Args[0] 17132 b := v.Block 17133 typ := &b.Func.Config.Types 17134 // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem) 17135 // result: (SETAEstore [off] {sym} ptr x mem) 17136 for { 17137 off := auxIntToInt32(v.AuxInt) 17138 sym := auxToSym(v.Aux) 17139 ptr := v_0 17140 if v_1.Op != OpAMD64InvertFlags { 17141 break 17142 } 17143 x := v_1.Args[0] 17144 mem := v_2 17145 v.reset(OpAMD64SETAEstore) 17146 v.AuxInt = int32ToAuxInt(off) 17147 v.Aux = symToAux(sym) 17148 v.AddArg3(ptr, x, mem) 17149 return true 17150 } 17151 // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem) 17152 // cond: is32Bit(int64(off1)+int64(off2)) 17153 // result: (SETBEstore [off1+off2] {sym} base val mem) 17154 for { 17155 off1 := auxIntToInt32(v.AuxInt) 17156 sym := auxToSym(v.Aux) 17157 if v_0.Op != OpAMD64ADDQconst { 17158 break 17159 } 17160 off2 := auxIntToInt32(v_0.AuxInt) 17161 base := v_0.Args[0] 17162 val := v_1 17163 mem := v_2 17164 if !(is32Bit(int64(off1) + int64(off2))) { 17165 break 17166 } 17167 v.reset(OpAMD64SETBEstore) 17168 v.AuxInt = int32ToAuxInt(off1 + off2) 17169 v.Aux = symToAux(sym) 17170 v.AddArg3(base, val, mem) 17171 return true 17172 } 17173 // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 17174 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 17175 // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 17176 for { 17177 off1 := auxIntToInt32(v.AuxInt) 17178 sym1 := auxToSym(v.Aux) 17179 if v_0.Op != OpAMD64LEAQ { 17180 break 17181 } 17182 off2 := auxIntToInt32(v_0.AuxInt) 17183 sym2 := auxToSym(v_0.Aux) 17184 base := v_0.Args[0] 17185 val := v_1 17186 mem := v_2 17187 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 17188 break 17189 } 17190 v.reset(OpAMD64SETBEstore) 17191 v.AuxInt = int32ToAuxInt(off1 + off2) 17192 v.Aux = symToAux(mergeSym(sym1, sym2)) 17193 v.AddArg3(base, val, mem) 17194 return true 17195 } 17196 // match: (SETBEstore [off] {sym} ptr (FlagEQ) mem) 17197 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 17198 for { 17199 off := auxIntToInt32(v.AuxInt) 17200 sym := auxToSym(v.Aux) 17201 ptr := v_0 17202 if v_1.Op != OpAMD64FlagEQ { 17203 break 17204 } 17205 mem := v_2 17206 v.reset(OpAMD64MOVBstore) 17207 v.AuxInt = int32ToAuxInt(off) 17208 v.Aux = symToAux(sym) 17209 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 17210 v0.AuxInt = int32ToAuxInt(1) 17211 v.AddArg3(ptr, v0, mem) 17212 return true 17213 } 17214 // match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) 17215 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 17216 for { 17217 off := auxIntToInt32(v.AuxInt) 17218 sym := auxToSym(v.Aux) 17219 ptr := v_0 17220 if v_1.Op != OpAMD64FlagLT_ULT { 17221 break 17222 } 17223 mem := v_2 17224 v.reset(OpAMD64MOVBstore) 17225 v.AuxInt = int32ToAuxInt(off) 17226 v.Aux = symToAux(sym) 17227 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 17228 v0.AuxInt = int32ToAuxInt(1) 17229 v.AddArg3(ptr, v0, mem) 17230 return true 17231 } 17232 // match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) 17233 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 17234 for { 17235 off := auxIntToInt32(v.AuxInt) 17236 sym := auxToSym(v.Aux) 17237 ptr := v_0 17238 if v_1.Op != OpAMD64FlagLT_UGT { 17239 break 17240 } 17241 mem := v_2 17242 v.reset(OpAMD64MOVBstore) 17243 v.AuxInt = int32ToAuxInt(off) 17244 v.Aux = symToAux(sym) 17245 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 17246 v0.AuxInt = int32ToAuxInt(0) 17247 v.AddArg3(ptr, v0, mem) 17248 return true 17249 } 17250 // match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) 17251 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 17252 for { 17253 off := auxIntToInt32(v.AuxInt) 17254 sym := auxToSym(v.Aux) 17255 ptr := v_0 17256 if v_1.Op != OpAMD64FlagGT_ULT { 17257 break 17258 } 17259 mem := v_2 17260 v.reset(OpAMD64MOVBstore) 17261 v.AuxInt = int32ToAuxInt(off) 17262 v.Aux = symToAux(sym) 17263 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 17264 v0.AuxInt = int32ToAuxInt(1) 17265 v.AddArg3(ptr, v0, mem) 17266 return true 17267 } 17268 // match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) 17269 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 17270 for { 17271 off := auxIntToInt32(v.AuxInt) 17272 sym := auxToSym(v.Aux) 17273 ptr := v_0 17274 if v_1.Op != OpAMD64FlagGT_UGT { 17275 break 17276 } 17277 mem := v_2 17278 v.reset(OpAMD64MOVBstore) 17279 v.AuxInt = int32ToAuxInt(off) 17280 v.Aux = symToAux(sym) 17281 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 17282 v0.AuxInt = int32ToAuxInt(0) 17283 v.AddArg3(ptr, v0, mem) 17284 return true 17285 } 17286 return false 17287 } 17288 func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { 17289 v_2 := v.Args[2] 17290 v_1 := v.Args[1] 17291 v_0 := v.Args[0] 17292 b := v.Block 17293 typ := &b.Func.Config.Types 17294 // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem) 17295 // result: (SETAstore [off] {sym} ptr x mem) 17296 for { 17297 off := auxIntToInt32(v.AuxInt) 17298 sym := auxToSym(v.Aux) 17299 ptr := v_0 17300 if v_1.Op != OpAMD64InvertFlags { 17301 break 17302 } 17303 x := v_1.Args[0] 17304 mem := v_2 17305 v.reset(OpAMD64SETAstore) 17306 v.AuxInt = int32ToAuxInt(off) 17307 v.Aux = symToAux(sym) 17308 v.AddArg3(ptr, x, mem) 17309 return true 17310 } 17311 // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem) 17312 // cond: is32Bit(int64(off1)+int64(off2)) 17313 // result: (SETBstore [off1+off2] {sym} base val mem) 17314 for { 17315 off1 := auxIntToInt32(v.AuxInt) 17316 sym := auxToSym(v.Aux) 17317 if v_0.Op != OpAMD64ADDQconst { 17318 break 17319 } 17320 off2 := auxIntToInt32(v_0.AuxInt) 17321 base := v_0.Args[0] 17322 val := v_1 17323 mem := v_2 17324 if !(is32Bit(int64(off1) + int64(off2))) { 17325 break 17326 } 17327 v.reset(OpAMD64SETBstore) 17328 v.AuxInt = int32ToAuxInt(off1 + off2) 17329 v.Aux = symToAux(sym) 17330 v.AddArg3(base, val, mem) 17331 return true 17332 } 17333 // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 17334 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 17335 // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 17336 for { 17337 off1 := auxIntToInt32(v.AuxInt) 17338 sym1 := auxToSym(v.Aux) 17339 if v_0.Op != OpAMD64LEAQ { 17340 break 17341 } 17342 off2 := auxIntToInt32(v_0.AuxInt) 17343 sym2 := auxToSym(v_0.Aux) 17344 base := v_0.Args[0] 17345 val := v_1 17346 mem := v_2 17347 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 17348 break 17349 } 17350 v.reset(OpAMD64SETBstore) 17351 v.AuxInt = int32ToAuxInt(off1 + off2) 17352 v.Aux = symToAux(mergeSym(sym1, sym2)) 17353 v.AddArg3(base, val, mem) 17354 return true 17355 } 17356 // match: (SETBstore [off] {sym} ptr (FlagEQ) mem) 17357 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 17358 for { 17359 off := auxIntToInt32(v.AuxInt) 17360 sym := auxToSym(v.Aux) 17361 ptr := v_0 17362 if v_1.Op != OpAMD64FlagEQ { 17363 break 17364 } 17365 mem := v_2 17366 v.reset(OpAMD64MOVBstore) 17367 v.AuxInt = int32ToAuxInt(off) 17368 v.Aux = symToAux(sym) 17369 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 17370 v0.AuxInt = int32ToAuxInt(0) 17371 v.AddArg3(ptr, v0, mem) 17372 return true 17373 } 17374 // match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem) 17375 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 17376 for { 17377 off := auxIntToInt32(v.AuxInt) 17378 sym := auxToSym(v.Aux) 17379 ptr := v_0 17380 if v_1.Op != OpAMD64FlagLT_ULT { 17381 break 17382 } 17383 mem := v_2 17384 v.reset(OpAMD64MOVBstore) 17385 v.AuxInt = int32ToAuxInt(off) 17386 v.Aux = symToAux(sym) 17387 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 17388 v0.AuxInt = int32ToAuxInt(1) 17389 v.AddArg3(ptr, v0, mem) 17390 return true 17391 } 17392 // match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem) 17393 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 17394 for { 17395 off := auxIntToInt32(v.AuxInt) 17396 sym := auxToSym(v.Aux) 17397 ptr := v_0 17398 if v_1.Op != OpAMD64FlagLT_UGT { 17399 break 17400 } 17401 mem := v_2 17402 v.reset(OpAMD64MOVBstore) 17403 v.AuxInt = int32ToAuxInt(off) 17404 v.Aux = symToAux(sym) 17405 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 17406 v0.AuxInt = int32ToAuxInt(0) 17407 v.AddArg3(ptr, v0, mem) 17408 return true 17409 } 17410 // match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem) 17411 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 17412 for { 17413 off := auxIntToInt32(v.AuxInt) 17414 sym := auxToSym(v.Aux) 17415 ptr := v_0 17416 if v_1.Op != OpAMD64FlagGT_ULT { 17417 break 17418 } 17419 mem := v_2 17420 v.reset(OpAMD64MOVBstore) 17421 v.AuxInt = int32ToAuxInt(off) 17422 v.Aux = symToAux(sym) 17423 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 17424 v0.AuxInt = int32ToAuxInt(1) 17425 v.AddArg3(ptr, v0, mem) 17426 return true 17427 } 17428 // match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem) 17429 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 17430 for { 17431 off := auxIntToInt32(v.AuxInt) 17432 sym := auxToSym(v.Aux) 17433 ptr := v_0 17434 if v_1.Op != OpAMD64FlagGT_UGT { 17435 break 17436 } 17437 mem := v_2 17438 v.reset(OpAMD64MOVBstore) 17439 v.AuxInt = int32ToAuxInt(off) 17440 v.Aux = symToAux(sym) 17441 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 17442 v0.AuxInt = int32ToAuxInt(0) 17443 v.AddArg3(ptr, v0, mem) 17444 return true 17445 } 17446 return false 17447 } 17448 func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { 17449 v_0 := v.Args[0] 17450 b := v.Block 17451 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 17452 // result: (SETAE (BTL x y)) 17453 for { 17454 if v_0.Op != OpAMD64TESTL { 17455 break 17456 } 17457 _ = v_0.Args[1] 17458 v_0_0 := v_0.Args[0] 17459 v_0_1 := v_0.Args[1] 17460 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 17461 if v_0_0.Op != OpAMD64SHLL { 17462 continue 17463 } 17464 x := v_0_0.Args[1] 17465 v_0_0_0 := v_0_0.Args[0] 17466 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { 17467 continue 17468 } 17469 y := v_0_1 17470 v.reset(OpAMD64SETAE) 17471 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 17472 v0.AddArg2(x, y) 17473 v.AddArg(v0) 17474 return true 17475 } 17476 break 17477 } 17478 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 17479 // result: (SETAE (BTQ x y)) 17480 for { 17481 if v_0.Op != OpAMD64TESTQ { 17482 break 17483 } 17484 _ = v_0.Args[1] 17485 v_0_0 := v_0.Args[0] 17486 v_0_1 := v_0.Args[1] 17487 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 17488 if v_0_0.Op != OpAMD64SHLQ { 17489 continue 17490 } 17491 x := v_0_0.Args[1] 17492 v_0_0_0 := v_0_0.Args[0] 17493 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { 17494 continue 17495 } 17496 y := v_0_1 17497 v.reset(OpAMD64SETAE) 17498 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 17499 v0.AddArg2(x, y) 17500 v.AddArg(v0) 17501 return true 17502 } 17503 break 17504 } 17505 // match: (SETEQ (TESTLconst [c] x)) 17506 // cond: isUint32PowerOfTwo(int64(c)) 17507 // result: (SETAE (BTLconst [int8(log32(c))] x)) 17508 for { 17509 if v_0.Op != OpAMD64TESTLconst { 17510 break 17511 } 17512 c := auxIntToInt32(v_0.AuxInt) 17513 x := v_0.Args[0] 17514 if !(isUint32PowerOfTwo(int64(c))) { 17515 break 17516 } 17517 v.reset(OpAMD64SETAE) 17518 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 17519 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 17520 v0.AddArg(x) 17521 v.AddArg(v0) 17522 return true 17523 } 17524 // match: (SETEQ (TESTQconst [c] x)) 17525 // cond: isUint64PowerOfTwo(int64(c)) 17526 // result: (SETAE (BTQconst [int8(log32(c))] x)) 17527 for { 17528 if v_0.Op != OpAMD64TESTQconst { 17529 break 17530 } 17531 c := auxIntToInt32(v_0.AuxInt) 17532 x := v_0.Args[0] 17533 if !(isUint64PowerOfTwo(int64(c))) { 17534 break 17535 } 17536 v.reset(OpAMD64SETAE) 17537 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 17538 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 17539 v0.AddArg(x) 17540 v.AddArg(v0) 17541 return true 17542 } 17543 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 17544 // cond: isUint64PowerOfTwo(c) 17545 // result: (SETAE (BTQconst [int8(log64(c))] x)) 17546 for { 17547 if v_0.Op != OpAMD64TESTQ { 17548 break 17549 } 17550 _ = v_0.Args[1] 17551 v_0_0 := v_0.Args[0] 17552 v_0_1 := v_0.Args[1] 17553 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 17554 if v_0_0.Op != OpAMD64MOVQconst { 17555 continue 17556 } 17557 c := auxIntToInt64(v_0_0.AuxInt) 17558 x := v_0_1 17559 if !(isUint64PowerOfTwo(c)) { 17560 continue 17561 } 17562 v.reset(OpAMD64SETAE) 17563 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 17564 v0.AuxInt = int8ToAuxInt(int8(log64(c))) 17565 v0.AddArg(x) 17566 v.AddArg(v0) 17567 return true 17568 } 17569 break 17570 } 17571 // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _))) 17572 // result: (SETNE (CMPLconst [0] s)) 17573 for { 17574 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 { 17575 break 17576 } 17577 s := v_0.Args[0] 17578 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { 17579 break 17580 } 17581 v.reset(OpAMD64SETNE) 17582 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 17583 v0.AuxInt = int32ToAuxInt(0) 17584 v0.AddArg(s) 17585 v.AddArg(v0) 17586 return true 17587 } 17588 // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _))) 17589 // result: (SETNE (CMPQconst [0] s)) 17590 for { 17591 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 { 17592 break 17593 } 17594 s := v_0.Args[0] 17595 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { 17596 break 17597 } 17598 v.reset(OpAMD64SETNE) 17599 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 17600 v0.AuxInt = int32ToAuxInt(0) 17601 v0.AddArg(s) 17602 v.AddArg(v0) 17603 return true 17604 } 17605 // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 17606 // cond: z1==z2 17607 // result: (SETAE (BTQconst [63] x)) 17608 for { 17609 if v_0.Op != OpAMD64TESTQ { 17610 break 17611 } 17612 _ = v_0.Args[1] 17613 v_0_0 := v_0.Args[0] 17614 v_0_1 := v_0.Args[1] 17615 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 17616 z1 := v_0_0 17617 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { 17618 continue 17619 } 17620 z1_0 := z1.Args[0] 17621 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 17622 continue 17623 } 17624 x := z1_0.Args[0] 17625 z2 := v_0_1 17626 if !(z1 == z2) { 17627 continue 17628 } 17629 v.reset(OpAMD64SETAE) 17630 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 17631 v0.AuxInt = int8ToAuxInt(63) 17632 v0.AddArg(x) 17633 v.AddArg(v0) 17634 return true 17635 } 17636 break 17637 } 17638 // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 17639 // cond: z1==z2 17640 // result: (SETAE (BTQconst [31] x)) 17641 for { 17642 if v_0.Op != OpAMD64TESTL { 17643 break 17644 } 17645 _ = v_0.Args[1] 17646 v_0_0 := v_0.Args[0] 17647 v_0_1 := v_0.Args[1] 17648 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 17649 z1 := v_0_0 17650 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { 17651 continue 17652 } 17653 z1_0 := z1.Args[0] 17654 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { 17655 continue 17656 } 17657 x := z1_0.Args[0] 17658 z2 := v_0_1 17659 if !(z1 == z2) { 17660 continue 17661 } 17662 v.reset(OpAMD64SETAE) 17663 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 17664 v0.AuxInt = int8ToAuxInt(31) 17665 v0.AddArg(x) 17666 v.AddArg(v0) 17667 return true 17668 } 17669 break 17670 } 17671 // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 17672 // cond: z1==z2 17673 // result: (SETAE (BTQconst [0] x)) 17674 for { 17675 if v_0.Op != OpAMD64TESTQ { 17676 break 17677 } 17678 _ = v_0.Args[1] 17679 v_0_0 := v_0.Args[0] 17680 v_0_1 := v_0.Args[1] 17681 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 17682 z1 := v_0_0 17683 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 17684 continue 17685 } 17686 z1_0 := z1.Args[0] 17687 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 17688 continue 17689 } 17690 x := z1_0.Args[0] 17691 z2 := v_0_1 17692 if !(z1 == z2) { 17693 continue 17694 } 17695 v.reset(OpAMD64SETAE) 17696 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 17697 v0.AuxInt = int8ToAuxInt(0) 17698 v0.AddArg(x) 17699 v.AddArg(v0) 17700 return true 17701 } 17702 break 17703 } 17704 // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 17705 // cond: z1==z2 17706 // result: (SETAE (BTLconst [0] x)) 17707 for { 17708 if v_0.Op != OpAMD64TESTL { 17709 break 17710 } 17711 _ = v_0.Args[1] 17712 v_0_0 := v_0.Args[0] 17713 v_0_1 := v_0.Args[1] 17714 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 17715 z1 := v_0_0 17716 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 17717 continue 17718 } 17719 z1_0 := z1.Args[0] 17720 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 17721 continue 17722 } 17723 x := z1_0.Args[0] 17724 z2 := v_0_1 17725 if !(z1 == z2) { 17726 continue 17727 } 17728 v.reset(OpAMD64SETAE) 17729 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 17730 v0.AuxInt = int8ToAuxInt(0) 17731 v0.AddArg(x) 17732 v.AddArg(v0) 17733 return true 17734 } 17735 break 17736 } 17737 // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2)) 17738 // cond: z1==z2 17739 // result: (SETAE (BTQconst [63] x)) 17740 for { 17741 if v_0.Op != OpAMD64TESTQ { 17742 break 17743 } 17744 _ = v_0.Args[1] 17745 v_0_0 := v_0.Args[0] 17746 v_0_1 := v_0.Args[1] 17747 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 17748 z1 := v_0_0 17749 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 17750 continue 17751 } 17752 x := z1.Args[0] 17753 z2 := v_0_1 17754 if !(z1 == z2) { 17755 continue 17756 } 17757 v.reset(OpAMD64SETAE) 17758 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 17759 v0.AuxInt = int8ToAuxInt(63) 17760 v0.AddArg(x) 17761 v.AddArg(v0) 17762 return true 17763 } 17764 break 17765 } 17766 // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2)) 17767 // cond: z1==z2 17768 // result: (SETAE (BTLconst [31] x)) 17769 for { 17770 if v_0.Op != OpAMD64TESTL { 17771 break 17772 } 17773 _ = v_0.Args[1] 17774 v_0_0 := v_0.Args[0] 17775 v_0_1 := v_0.Args[1] 17776 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 17777 z1 := v_0_0 17778 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 17779 continue 17780 } 17781 x := z1.Args[0] 17782 z2 := v_0_1 17783 if !(z1 == z2) { 17784 continue 17785 } 17786 v.reset(OpAMD64SETAE) 17787 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 17788 v0.AuxInt = int8ToAuxInt(31) 17789 v0.AddArg(x) 17790 v.AddArg(v0) 17791 return true 17792 } 17793 break 17794 } 17795 // match: (SETEQ (InvertFlags x)) 17796 // result: (SETEQ x) 17797 for { 17798 if v_0.Op != OpAMD64InvertFlags { 17799 break 17800 } 17801 x := v_0.Args[0] 17802 v.reset(OpAMD64SETEQ) 17803 v.AddArg(x) 17804 return true 17805 } 17806 // match: (SETEQ (FlagEQ)) 17807 // result: (MOVLconst [1]) 17808 for { 17809 if v_0.Op != OpAMD64FlagEQ { 17810 break 17811 } 17812 v.reset(OpAMD64MOVLconst) 17813 v.AuxInt = int32ToAuxInt(1) 17814 return true 17815 } 17816 // match: (SETEQ (FlagLT_ULT)) 17817 // result: (MOVLconst [0]) 17818 for { 17819 if v_0.Op != OpAMD64FlagLT_ULT { 17820 break 17821 } 17822 v.reset(OpAMD64MOVLconst) 17823 v.AuxInt = int32ToAuxInt(0) 17824 return true 17825 } 17826 // match: (SETEQ (FlagLT_UGT)) 17827 // result: (MOVLconst [0]) 17828 for { 17829 if v_0.Op != OpAMD64FlagLT_UGT { 17830 break 17831 } 17832 v.reset(OpAMD64MOVLconst) 17833 v.AuxInt = int32ToAuxInt(0) 17834 return true 17835 } 17836 // match: (SETEQ (FlagGT_ULT)) 17837 // result: (MOVLconst [0]) 17838 for { 17839 if v_0.Op != OpAMD64FlagGT_ULT { 17840 break 17841 } 17842 v.reset(OpAMD64MOVLconst) 17843 v.AuxInt = int32ToAuxInt(0) 17844 return true 17845 } 17846 // match: (SETEQ (FlagGT_UGT)) 17847 // result: (MOVLconst [0]) 17848 for { 17849 if v_0.Op != OpAMD64FlagGT_UGT { 17850 break 17851 } 17852 v.reset(OpAMD64MOVLconst) 17853 v.AuxInt = int32ToAuxInt(0) 17854 return true 17855 } 17856 // match: (SETEQ (TESTQ s:(Select0 blsr:(BLSRQ _)) s)) 17857 // result: (SETEQ (Select1 <types.TypeFlags> blsr)) 17858 for { 17859 if v_0.Op != OpAMD64TESTQ { 17860 break 17861 } 17862 _ = v_0.Args[1] 17863 v_0_0 := v_0.Args[0] 17864 v_0_1 := v_0.Args[1] 17865 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 17866 s := v_0_0 17867 if s.Op != OpSelect0 { 17868 continue 17869 } 17870 blsr := s.Args[0] 17871 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 { 17872 continue 17873 } 17874 v.reset(OpAMD64SETEQ) 17875 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 17876 v0.AddArg(blsr) 17877 v.AddArg(v0) 17878 return true 17879 } 17880 break 17881 } 17882 // match: (SETEQ (TESTL s:(Select0 blsr:(BLSRL _)) s)) 17883 // result: (SETEQ (Select1 <types.TypeFlags> blsr)) 17884 for { 17885 if v_0.Op != OpAMD64TESTL { 17886 break 17887 } 17888 _ = v_0.Args[1] 17889 v_0_0 := v_0.Args[0] 17890 v_0_1 := v_0.Args[1] 17891 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 17892 s := v_0_0 17893 if s.Op != OpSelect0 { 17894 continue 17895 } 17896 blsr := s.Args[0] 17897 if blsr.Op != OpAMD64BLSRL || s != v_0_1 { 17898 continue 17899 } 17900 v.reset(OpAMD64SETEQ) 17901 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 17902 v0.AddArg(blsr) 17903 v.AddArg(v0) 17904 return true 17905 } 17906 break 17907 } 17908 return false 17909 } 17910 func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { 17911 v_2 := v.Args[2] 17912 v_1 := v.Args[1] 17913 v_0 := v.Args[0] 17914 b := v.Block 17915 typ := &b.Func.Config.Types 17916 // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 17917 // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) 17918 for { 17919 off := auxIntToInt32(v.AuxInt) 17920 sym := auxToSym(v.Aux) 17921 ptr := v_0 17922 if v_1.Op != OpAMD64TESTL { 17923 break 17924 } 17925 _ = v_1.Args[1] 17926 v_1_0 := v_1.Args[0] 17927 v_1_1 := v_1.Args[1] 17928 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 17929 if v_1_0.Op != OpAMD64SHLL { 17930 continue 17931 } 17932 x := v_1_0.Args[1] 17933 v_1_0_0 := v_1_0.Args[0] 17934 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 { 17935 continue 17936 } 17937 y := v_1_1 17938 mem := v_2 17939 v.reset(OpAMD64SETAEstore) 17940 v.AuxInt = int32ToAuxInt(off) 17941 v.Aux = symToAux(sym) 17942 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 17943 v0.AddArg2(x, y) 17944 v.AddArg3(ptr, v0, mem) 17945 return true 17946 } 17947 break 17948 } 17949 // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 17950 // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem) 17951 for { 17952 off := auxIntToInt32(v.AuxInt) 17953 sym := auxToSym(v.Aux) 17954 ptr := v_0 17955 if v_1.Op != OpAMD64TESTQ { 17956 break 17957 } 17958 _ = v_1.Args[1] 17959 v_1_0 := v_1.Args[0] 17960 v_1_1 := v_1.Args[1] 17961 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 17962 if v_1_0.Op != OpAMD64SHLQ { 17963 continue 17964 } 17965 x := v_1_0.Args[1] 17966 v_1_0_0 := v_1_0.Args[0] 17967 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 { 17968 continue 17969 } 17970 y := v_1_1 17971 mem := v_2 17972 v.reset(OpAMD64SETAEstore) 17973 v.AuxInt = int32ToAuxInt(off) 17974 v.Aux = symToAux(sym) 17975 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 17976 v0.AddArg2(x, y) 17977 v.AddArg3(ptr, v0, mem) 17978 return true 17979 } 17980 break 17981 } 17982 // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem) 17983 // cond: isUint32PowerOfTwo(int64(c)) 17984 // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem) 17985 for { 17986 off := auxIntToInt32(v.AuxInt) 17987 sym := auxToSym(v.Aux) 17988 ptr := v_0 17989 if v_1.Op != OpAMD64TESTLconst { 17990 break 17991 } 17992 c := auxIntToInt32(v_1.AuxInt) 17993 x := v_1.Args[0] 17994 mem := v_2 17995 if !(isUint32PowerOfTwo(int64(c))) { 17996 break 17997 } 17998 v.reset(OpAMD64SETAEstore) 17999 v.AuxInt = int32ToAuxInt(off) 18000 v.Aux = symToAux(sym) 18001 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 18002 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 18003 v0.AddArg(x) 18004 v.AddArg3(ptr, v0, mem) 18005 return true 18006 } 18007 // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem) 18008 // cond: isUint64PowerOfTwo(int64(c)) 18009 // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem) 18010 for { 18011 off := auxIntToInt32(v.AuxInt) 18012 sym := auxToSym(v.Aux) 18013 ptr := v_0 18014 if v_1.Op != OpAMD64TESTQconst { 18015 break 18016 } 18017 c := auxIntToInt32(v_1.AuxInt) 18018 x := v_1.Args[0] 18019 mem := v_2 18020 if !(isUint64PowerOfTwo(int64(c))) { 18021 break 18022 } 18023 v.reset(OpAMD64SETAEstore) 18024 v.AuxInt = int32ToAuxInt(off) 18025 v.Aux = symToAux(sym) 18026 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 18027 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 18028 v0.AddArg(x) 18029 v.AddArg3(ptr, v0, mem) 18030 return true 18031 } 18032 // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 18033 // cond: isUint64PowerOfTwo(c) 18034 // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem) 18035 for { 18036 off := auxIntToInt32(v.AuxInt) 18037 sym := auxToSym(v.Aux) 18038 ptr := v_0 18039 if v_1.Op != OpAMD64TESTQ { 18040 break 18041 } 18042 _ = v_1.Args[1] 18043 v_1_0 := v_1.Args[0] 18044 v_1_1 := v_1.Args[1] 18045 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 18046 if v_1_0.Op != OpAMD64MOVQconst { 18047 continue 18048 } 18049 c := auxIntToInt64(v_1_0.AuxInt) 18050 x := v_1_1 18051 mem := v_2 18052 if !(isUint64PowerOfTwo(c)) { 18053 continue 18054 } 18055 v.reset(OpAMD64SETAEstore) 18056 v.AuxInt = int32ToAuxInt(off) 18057 v.Aux = symToAux(sym) 18058 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 18059 v0.AuxInt = int8ToAuxInt(int8(log64(c))) 18060 v0.AddArg(x) 18061 v.AddArg3(ptr, v0, mem) 18062 return true 18063 } 18064 break 18065 } 18066 // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) 18067 // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem) 18068 for { 18069 off := auxIntToInt32(v.AuxInt) 18070 sym := auxToSym(v.Aux) 18071 ptr := v_0 18072 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 { 18073 break 18074 } 18075 s := v_1.Args[0] 18076 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { 18077 break 18078 } 18079 mem := v_2 18080 v.reset(OpAMD64SETNEstore) 18081 v.AuxInt = int32ToAuxInt(off) 18082 v.Aux = symToAux(sym) 18083 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 18084 v0.AuxInt = int32ToAuxInt(0) 18085 v0.AddArg(s) 18086 v.AddArg3(ptr, v0, mem) 18087 return true 18088 } 18089 // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) 18090 // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem) 18091 for { 18092 off := auxIntToInt32(v.AuxInt) 18093 sym := auxToSym(v.Aux) 18094 ptr := v_0 18095 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 { 18096 break 18097 } 18098 s := v_1.Args[0] 18099 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { 18100 break 18101 } 18102 mem := v_2 18103 v.reset(OpAMD64SETNEstore) 18104 v.AuxInt = int32ToAuxInt(off) 18105 v.Aux = symToAux(sym) 18106 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 18107 v0.AuxInt = int32ToAuxInt(0) 18108 v0.AddArg(s) 18109 v.AddArg3(ptr, v0, mem) 18110 return true 18111 } 18112 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) 18113 // cond: z1==z2 18114 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 18115 for { 18116 off := auxIntToInt32(v.AuxInt) 18117 sym := auxToSym(v.Aux) 18118 ptr := v_0 18119 if v_1.Op != OpAMD64TESTQ { 18120 break 18121 } 18122 _ = v_1.Args[1] 18123 v_1_0 := v_1.Args[0] 18124 v_1_1 := v_1.Args[1] 18125 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 18126 z1 := v_1_0 18127 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { 18128 continue 18129 } 18130 z1_0 := z1.Args[0] 18131 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 18132 continue 18133 } 18134 x := z1_0.Args[0] 18135 z2 := v_1_1 18136 mem := v_2 18137 if !(z1 == z2) { 18138 continue 18139 } 18140 v.reset(OpAMD64SETAEstore) 18141 v.AuxInt = int32ToAuxInt(off) 18142 v.Aux = symToAux(sym) 18143 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 18144 v0.AuxInt = int8ToAuxInt(63) 18145 v0.AddArg(x) 18146 v.AddArg3(ptr, v0, mem) 18147 return true 18148 } 18149 break 18150 } 18151 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) 18152 // cond: z1==z2 18153 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 18154 for { 18155 off := auxIntToInt32(v.AuxInt) 18156 sym := auxToSym(v.Aux) 18157 ptr := v_0 18158 if v_1.Op != OpAMD64TESTL { 18159 break 18160 } 18161 _ = v_1.Args[1] 18162 v_1_0 := v_1.Args[0] 18163 v_1_1 := v_1.Args[1] 18164 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 18165 z1 := v_1_0 18166 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { 18167 continue 18168 } 18169 z1_0 := z1.Args[0] 18170 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 18171 continue 18172 } 18173 x := z1_0.Args[0] 18174 z2 := v_1_1 18175 mem := v_2 18176 if !(z1 == z2) { 18177 continue 18178 } 18179 v.reset(OpAMD64SETAEstore) 18180 v.AuxInt = int32ToAuxInt(off) 18181 v.Aux = symToAux(sym) 18182 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 18183 v0.AuxInt = int8ToAuxInt(31) 18184 v0.AddArg(x) 18185 v.AddArg3(ptr, v0, mem) 18186 return true 18187 } 18188 break 18189 } 18190 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) 18191 // cond: z1==z2 18192 // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem) 18193 for { 18194 off := auxIntToInt32(v.AuxInt) 18195 sym := auxToSym(v.Aux) 18196 ptr := v_0 18197 if v_1.Op != OpAMD64TESTQ { 18198 break 18199 } 18200 _ = v_1.Args[1] 18201 v_1_0 := v_1.Args[0] 18202 v_1_1 := v_1.Args[1] 18203 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 18204 z1 := v_1_0 18205 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 18206 continue 18207 } 18208 z1_0 := z1.Args[0] 18209 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 18210 continue 18211 } 18212 x := z1_0.Args[0] 18213 z2 := v_1_1 18214 mem := v_2 18215 if !(z1 == z2) { 18216 continue 18217 } 18218 v.reset(OpAMD64SETAEstore) 18219 v.AuxInt = int32ToAuxInt(off) 18220 v.Aux = symToAux(sym) 18221 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 18222 v0.AuxInt = int8ToAuxInt(0) 18223 v0.AddArg(x) 18224 v.AddArg3(ptr, v0, mem) 18225 return true 18226 } 18227 break 18228 } 18229 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) 18230 // cond: z1==z2 18231 // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem) 18232 for { 18233 off := auxIntToInt32(v.AuxInt) 18234 sym := auxToSym(v.Aux) 18235 ptr := v_0 18236 if v_1.Op != OpAMD64TESTL { 18237 break 18238 } 18239 _ = v_1.Args[1] 18240 v_1_0 := v_1.Args[0] 18241 v_1_1 := v_1.Args[1] 18242 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 18243 z1 := v_1_0 18244 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 18245 continue 18246 } 18247 z1_0 := z1.Args[0] 18248 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 18249 continue 18250 } 18251 x := z1_0.Args[0] 18252 z2 := v_1_1 18253 mem := v_2 18254 if !(z1 == z2) { 18255 continue 18256 } 18257 v.reset(OpAMD64SETAEstore) 18258 v.AuxInt = int32ToAuxInt(off) 18259 v.Aux = symToAux(sym) 18260 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 18261 v0.AuxInt = int8ToAuxInt(0) 18262 v0.AddArg(x) 18263 v.AddArg3(ptr, v0, mem) 18264 return true 18265 } 18266 break 18267 } 18268 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) 18269 // cond: z1==z2 18270 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 18271 for { 18272 off := auxIntToInt32(v.AuxInt) 18273 sym := auxToSym(v.Aux) 18274 ptr := v_0 18275 if v_1.Op != OpAMD64TESTQ { 18276 break 18277 } 18278 _ = v_1.Args[1] 18279 v_1_0 := v_1.Args[0] 18280 v_1_1 := v_1.Args[1] 18281 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 18282 z1 := v_1_0 18283 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 18284 continue 18285 } 18286 x := z1.Args[0] 18287 z2 := v_1_1 18288 mem := v_2 18289 if !(z1 == z2) { 18290 continue 18291 } 18292 v.reset(OpAMD64SETAEstore) 18293 v.AuxInt = int32ToAuxInt(off) 18294 v.Aux = symToAux(sym) 18295 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 18296 v0.AuxInt = int8ToAuxInt(63) 18297 v0.AddArg(x) 18298 v.AddArg3(ptr, v0, mem) 18299 return true 18300 } 18301 break 18302 } 18303 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) 18304 // cond: z1==z2 18305 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 18306 for { 18307 off := auxIntToInt32(v.AuxInt) 18308 sym := auxToSym(v.Aux) 18309 ptr := v_0 18310 if v_1.Op != OpAMD64TESTL { 18311 break 18312 } 18313 _ = v_1.Args[1] 18314 v_1_0 := v_1.Args[0] 18315 v_1_1 := v_1.Args[1] 18316 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 18317 z1 := v_1_0 18318 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 18319 continue 18320 } 18321 x := z1.Args[0] 18322 z2 := v_1_1 18323 mem := v_2 18324 if !(z1 == z2) { 18325 continue 18326 } 18327 v.reset(OpAMD64SETAEstore) 18328 v.AuxInt = int32ToAuxInt(off) 18329 v.Aux = symToAux(sym) 18330 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 18331 v0.AuxInt = int8ToAuxInt(31) 18332 v0.AddArg(x) 18333 v.AddArg3(ptr, v0, mem) 18334 return true 18335 } 18336 break 18337 } 18338 // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem) 18339 // result: (SETEQstore [off] {sym} ptr x mem) 18340 for { 18341 off := auxIntToInt32(v.AuxInt) 18342 sym := auxToSym(v.Aux) 18343 ptr := v_0 18344 if v_1.Op != OpAMD64InvertFlags { 18345 break 18346 } 18347 x := v_1.Args[0] 18348 mem := v_2 18349 v.reset(OpAMD64SETEQstore) 18350 v.AuxInt = int32ToAuxInt(off) 18351 v.Aux = symToAux(sym) 18352 v.AddArg3(ptr, x, mem) 18353 return true 18354 } 18355 // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem) 18356 // cond: is32Bit(int64(off1)+int64(off2)) 18357 // result: (SETEQstore [off1+off2] {sym} base val mem) 18358 for { 18359 off1 := auxIntToInt32(v.AuxInt) 18360 sym := auxToSym(v.Aux) 18361 if v_0.Op != OpAMD64ADDQconst { 18362 break 18363 } 18364 off2 := auxIntToInt32(v_0.AuxInt) 18365 base := v_0.Args[0] 18366 val := v_1 18367 mem := v_2 18368 if !(is32Bit(int64(off1) + int64(off2))) { 18369 break 18370 } 18371 v.reset(OpAMD64SETEQstore) 18372 v.AuxInt = int32ToAuxInt(off1 + off2) 18373 v.Aux = symToAux(sym) 18374 v.AddArg3(base, val, mem) 18375 return true 18376 } 18377 // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 18378 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 18379 // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 18380 for { 18381 off1 := auxIntToInt32(v.AuxInt) 18382 sym1 := auxToSym(v.Aux) 18383 if v_0.Op != OpAMD64LEAQ { 18384 break 18385 } 18386 off2 := auxIntToInt32(v_0.AuxInt) 18387 sym2 := auxToSym(v_0.Aux) 18388 base := v_0.Args[0] 18389 val := v_1 18390 mem := v_2 18391 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 18392 break 18393 } 18394 v.reset(OpAMD64SETEQstore) 18395 v.AuxInt = int32ToAuxInt(off1 + off2) 18396 v.Aux = symToAux(mergeSym(sym1, sym2)) 18397 v.AddArg3(base, val, mem) 18398 return true 18399 } 18400 // match: (SETEQstore [off] {sym} ptr (FlagEQ) mem) 18401 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 18402 for { 18403 off := auxIntToInt32(v.AuxInt) 18404 sym := auxToSym(v.Aux) 18405 ptr := v_0 18406 if v_1.Op != OpAMD64FlagEQ { 18407 break 18408 } 18409 mem := v_2 18410 v.reset(OpAMD64MOVBstore) 18411 v.AuxInt = int32ToAuxInt(off) 18412 v.Aux = symToAux(sym) 18413 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18414 v0.AuxInt = int32ToAuxInt(1) 18415 v.AddArg3(ptr, v0, mem) 18416 return true 18417 } 18418 // match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) 18419 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 18420 for { 18421 off := auxIntToInt32(v.AuxInt) 18422 sym := auxToSym(v.Aux) 18423 ptr := v_0 18424 if v_1.Op != OpAMD64FlagLT_ULT { 18425 break 18426 } 18427 mem := v_2 18428 v.reset(OpAMD64MOVBstore) 18429 v.AuxInt = int32ToAuxInt(off) 18430 v.Aux = symToAux(sym) 18431 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18432 v0.AuxInt = int32ToAuxInt(0) 18433 v.AddArg3(ptr, v0, mem) 18434 return true 18435 } 18436 // match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) 18437 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 18438 for { 18439 off := auxIntToInt32(v.AuxInt) 18440 sym := auxToSym(v.Aux) 18441 ptr := v_0 18442 if v_1.Op != OpAMD64FlagLT_UGT { 18443 break 18444 } 18445 mem := v_2 18446 v.reset(OpAMD64MOVBstore) 18447 v.AuxInt = int32ToAuxInt(off) 18448 v.Aux = symToAux(sym) 18449 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18450 v0.AuxInt = int32ToAuxInt(0) 18451 v.AddArg3(ptr, v0, mem) 18452 return true 18453 } 18454 // match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) 18455 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 18456 for { 18457 off := auxIntToInt32(v.AuxInt) 18458 sym := auxToSym(v.Aux) 18459 ptr := v_0 18460 if v_1.Op != OpAMD64FlagGT_ULT { 18461 break 18462 } 18463 mem := v_2 18464 v.reset(OpAMD64MOVBstore) 18465 v.AuxInt = int32ToAuxInt(off) 18466 v.Aux = symToAux(sym) 18467 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18468 v0.AuxInt = int32ToAuxInt(0) 18469 v.AddArg3(ptr, v0, mem) 18470 return true 18471 } 18472 // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) 18473 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 18474 for { 18475 off := auxIntToInt32(v.AuxInt) 18476 sym := auxToSym(v.Aux) 18477 ptr := v_0 18478 if v_1.Op != OpAMD64FlagGT_UGT { 18479 break 18480 } 18481 mem := v_2 18482 v.reset(OpAMD64MOVBstore) 18483 v.AuxInt = int32ToAuxInt(off) 18484 v.Aux = symToAux(sym) 18485 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18486 v0.AuxInt = int32ToAuxInt(0) 18487 v.AddArg3(ptr, v0, mem) 18488 return true 18489 } 18490 return false 18491 } 18492 func rewriteValueAMD64_OpAMD64SETG(v *Value) bool { 18493 v_0 := v.Args[0] 18494 // match: (SETG (InvertFlags x)) 18495 // result: (SETL x) 18496 for { 18497 if v_0.Op != OpAMD64InvertFlags { 18498 break 18499 } 18500 x := v_0.Args[0] 18501 v.reset(OpAMD64SETL) 18502 v.AddArg(x) 18503 return true 18504 } 18505 // match: (SETG (FlagEQ)) 18506 // result: (MOVLconst [0]) 18507 for { 18508 if v_0.Op != OpAMD64FlagEQ { 18509 break 18510 } 18511 v.reset(OpAMD64MOVLconst) 18512 v.AuxInt = int32ToAuxInt(0) 18513 return true 18514 } 18515 // match: (SETG (FlagLT_ULT)) 18516 // result: (MOVLconst [0]) 18517 for { 18518 if v_0.Op != OpAMD64FlagLT_ULT { 18519 break 18520 } 18521 v.reset(OpAMD64MOVLconst) 18522 v.AuxInt = int32ToAuxInt(0) 18523 return true 18524 } 18525 // match: (SETG (FlagLT_UGT)) 18526 // result: (MOVLconst [0]) 18527 for { 18528 if v_0.Op != OpAMD64FlagLT_UGT { 18529 break 18530 } 18531 v.reset(OpAMD64MOVLconst) 18532 v.AuxInt = int32ToAuxInt(0) 18533 return true 18534 } 18535 // match: (SETG (FlagGT_ULT)) 18536 // result: (MOVLconst [1]) 18537 for { 18538 if v_0.Op != OpAMD64FlagGT_ULT { 18539 break 18540 } 18541 v.reset(OpAMD64MOVLconst) 18542 v.AuxInt = int32ToAuxInt(1) 18543 return true 18544 } 18545 // match: (SETG (FlagGT_UGT)) 18546 // result: (MOVLconst [1]) 18547 for { 18548 if v_0.Op != OpAMD64FlagGT_UGT { 18549 break 18550 } 18551 v.reset(OpAMD64MOVLconst) 18552 v.AuxInt = int32ToAuxInt(1) 18553 return true 18554 } 18555 return false 18556 } 18557 func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool { 18558 v_0 := v.Args[0] 18559 // match: (SETGE (InvertFlags x)) 18560 // result: (SETLE x) 18561 for { 18562 if v_0.Op != OpAMD64InvertFlags { 18563 break 18564 } 18565 x := v_0.Args[0] 18566 v.reset(OpAMD64SETLE) 18567 v.AddArg(x) 18568 return true 18569 } 18570 // match: (SETGE (FlagEQ)) 18571 // result: (MOVLconst [1]) 18572 for { 18573 if v_0.Op != OpAMD64FlagEQ { 18574 break 18575 } 18576 v.reset(OpAMD64MOVLconst) 18577 v.AuxInt = int32ToAuxInt(1) 18578 return true 18579 } 18580 // match: (SETGE (FlagLT_ULT)) 18581 // result: (MOVLconst [0]) 18582 for { 18583 if v_0.Op != OpAMD64FlagLT_ULT { 18584 break 18585 } 18586 v.reset(OpAMD64MOVLconst) 18587 v.AuxInt = int32ToAuxInt(0) 18588 return true 18589 } 18590 // match: (SETGE (FlagLT_UGT)) 18591 // result: (MOVLconst [0]) 18592 for { 18593 if v_0.Op != OpAMD64FlagLT_UGT { 18594 break 18595 } 18596 v.reset(OpAMD64MOVLconst) 18597 v.AuxInt = int32ToAuxInt(0) 18598 return true 18599 } 18600 // match: (SETGE (FlagGT_ULT)) 18601 // result: (MOVLconst [1]) 18602 for { 18603 if v_0.Op != OpAMD64FlagGT_ULT { 18604 break 18605 } 18606 v.reset(OpAMD64MOVLconst) 18607 v.AuxInt = int32ToAuxInt(1) 18608 return true 18609 } 18610 // match: (SETGE (FlagGT_UGT)) 18611 // result: (MOVLconst [1]) 18612 for { 18613 if v_0.Op != OpAMD64FlagGT_UGT { 18614 break 18615 } 18616 v.reset(OpAMD64MOVLconst) 18617 v.AuxInt = int32ToAuxInt(1) 18618 return true 18619 } 18620 return false 18621 } 18622 func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { 18623 v_2 := v.Args[2] 18624 v_1 := v.Args[1] 18625 v_0 := v.Args[0] 18626 b := v.Block 18627 typ := &b.Func.Config.Types 18628 // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem) 18629 // result: (SETLEstore [off] {sym} ptr x mem) 18630 for { 18631 off := auxIntToInt32(v.AuxInt) 18632 sym := auxToSym(v.Aux) 18633 ptr := v_0 18634 if v_1.Op != OpAMD64InvertFlags { 18635 break 18636 } 18637 x := v_1.Args[0] 18638 mem := v_2 18639 v.reset(OpAMD64SETLEstore) 18640 v.AuxInt = int32ToAuxInt(off) 18641 v.Aux = symToAux(sym) 18642 v.AddArg3(ptr, x, mem) 18643 return true 18644 } 18645 // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem) 18646 // cond: is32Bit(int64(off1)+int64(off2)) 18647 // result: (SETGEstore [off1+off2] {sym} base val mem) 18648 for { 18649 off1 := auxIntToInt32(v.AuxInt) 18650 sym := auxToSym(v.Aux) 18651 if v_0.Op != OpAMD64ADDQconst { 18652 break 18653 } 18654 off2 := auxIntToInt32(v_0.AuxInt) 18655 base := v_0.Args[0] 18656 val := v_1 18657 mem := v_2 18658 if !(is32Bit(int64(off1) + int64(off2))) { 18659 break 18660 } 18661 v.reset(OpAMD64SETGEstore) 18662 v.AuxInt = int32ToAuxInt(off1 + off2) 18663 v.Aux = symToAux(sym) 18664 v.AddArg3(base, val, mem) 18665 return true 18666 } 18667 // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 18668 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 18669 // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 18670 for { 18671 off1 := auxIntToInt32(v.AuxInt) 18672 sym1 := auxToSym(v.Aux) 18673 if v_0.Op != OpAMD64LEAQ { 18674 break 18675 } 18676 off2 := auxIntToInt32(v_0.AuxInt) 18677 sym2 := auxToSym(v_0.Aux) 18678 base := v_0.Args[0] 18679 val := v_1 18680 mem := v_2 18681 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 18682 break 18683 } 18684 v.reset(OpAMD64SETGEstore) 18685 v.AuxInt = int32ToAuxInt(off1 + off2) 18686 v.Aux = symToAux(mergeSym(sym1, sym2)) 18687 v.AddArg3(base, val, mem) 18688 return true 18689 } 18690 // match: (SETGEstore [off] {sym} ptr (FlagEQ) mem) 18691 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 18692 for { 18693 off := auxIntToInt32(v.AuxInt) 18694 sym := auxToSym(v.Aux) 18695 ptr := v_0 18696 if v_1.Op != OpAMD64FlagEQ { 18697 break 18698 } 18699 mem := v_2 18700 v.reset(OpAMD64MOVBstore) 18701 v.AuxInt = int32ToAuxInt(off) 18702 v.Aux = symToAux(sym) 18703 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18704 v0.AuxInt = int32ToAuxInt(1) 18705 v.AddArg3(ptr, v0, mem) 18706 return true 18707 } 18708 // match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) 18709 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 18710 for { 18711 off := auxIntToInt32(v.AuxInt) 18712 sym := auxToSym(v.Aux) 18713 ptr := v_0 18714 if v_1.Op != OpAMD64FlagLT_ULT { 18715 break 18716 } 18717 mem := v_2 18718 v.reset(OpAMD64MOVBstore) 18719 v.AuxInt = int32ToAuxInt(off) 18720 v.Aux = symToAux(sym) 18721 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18722 v0.AuxInt = int32ToAuxInt(0) 18723 v.AddArg3(ptr, v0, mem) 18724 return true 18725 } 18726 // match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) 18727 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 18728 for { 18729 off := auxIntToInt32(v.AuxInt) 18730 sym := auxToSym(v.Aux) 18731 ptr := v_0 18732 if v_1.Op != OpAMD64FlagLT_UGT { 18733 break 18734 } 18735 mem := v_2 18736 v.reset(OpAMD64MOVBstore) 18737 v.AuxInt = int32ToAuxInt(off) 18738 v.Aux = symToAux(sym) 18739 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18740 v0.AuxInt = int32ToAuxInt(0) 18741 v.AddArg3(ptr, v0, mem) 18742 return true 18743 } 18744 // match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) 18745 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 18746 for { 18747 off := auxIntToInt32(v.AuxInt) 18748 sym := auxToSym(v.Aux) 18749 ptr := v_0 18750 if v_1.Op != OpAMD64FlagGT_ULT { 18751 break 18752 } 18753 mem := v_2 18754 v.reset(OpAMD64MOVBstore) 18755 v.AuxInt = int32ToAuxInt(off) 18756 v.Aux = symToAux(sym) 18757 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18758 v0.AuxInt = int32ToAuxInt(1) 18759 v.AddArg3(ptr, v0, mem) 18760 return true 18761 } 18762 // match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) 18763 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 18764 for { 18765 off := auxIntToInt32(v.AuxInt) 18766 sym := auxToSym(v.Aux) 18767 ptr := v_0 18768 if v_1.Op != OpAMD64FlagGT_UGT { 18769 break 18770 } 18771 mem := v_2 18772 v.reset(OpAMD64MOVBstore) 18773 v.AuxInt = int32ToAuxInt(off) 18774 v.Aux = symToAux(sym) 18775 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18776 v0.AuxInt = int32ToAuxInt(1) 18777 v.AddArg3(ptr, v0, mem) 18778 return true 18779 } 18780 return false 18781 } 18782 func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { 18783 v_2 := v.Args[2] 18784 v_1 := v.Args[1] 18785 v_0 := v.Args[0] 18786 b := v.Block 18787 typ := &b.Func.Config.Types 18788 // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem) 18789 // result: (SETLstore [off] {sym} ptr x mem) 18790 for { 18791 off := auxIntToInt32(v.AuxInt) 18792 sym := auxToSym(v.Aux) 18793 ptr := v_0 18794 if v_1.Op != OpAMD64InvertFlags { 18795 break 18796 } 18797 x := v_1.Args[0] 18798 mem := v_2 18799 v.reset(OpAMD64SETLstore) 18800 v.AuxInt = int32ToAuxInt(off) 18801 v.Aux = symToAux(sym) 18802 v.AddArg3(ptr, x, mem) 18803 return true 18804 } 18805 // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem) 18806 // cond: is32Bit(int64(off1)+int64(off2)) 18807 // result: (SETGstore [off1+off2] {sym} base val mem) 18808 for { 18809 off1 := auxIntToInt32(v.AuxInt) 18810 sym := auxToSym(v.Aux) 18811 if v_0.Op != OpAMD64ADDQconst { 18812 break 18813 } 18814 off2 := auxIntToInt32(v_0.AuxInt) 18815 base := v_0.Args[0] 18816 val := v_1 18817 mem := v_2 18818 if !(is32Bit(int64(off1) + int64(off2))) { 18819 break 18820 } 18821 v.reset(OpAMD64SETGstore) 18822 v.AuxInt = int32ToAuxInt(off1 + off2) 18823 v.Aux = symToAux(sym) 18824 v.AddArg3(base, val, mem) 18825 return true 18826 } 18827 // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 18828 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 18829 // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 18830 for { 18831 off1 := auxIntToInt32(v.AuxInt) 18832 sym1 := auxToSym(v.Aux) 18833 if v_0.Op != OpAMD64LEAQ { 18834 break 18835 } 18836 off2 := auxIntToInt32(v_0.AuxInt) 18837 sym2 := auxToSym(v_0.Aux) 18838 base := v_0.Args[0] 18839 val := v_1 18840 mem := v_2 18841 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 18842 break 18843 } 18844 v.reset(OpAMD64SETGstore) 18845 v.AuxInt = int32ToAuxInt(off1 + off2) 18846 v.Aux = symToAux(mergeSym(sym1, sym2)) 18847 v.AddArg3(base, val, mem) 18848 return true 18849 } 18850 // match: (SETGstore [off] {sym} ptr (FlagEQ) mem) 18851 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 18852 for { 18853 off := auxIntToInt32(v.AuxInt) 18854 sym := auxToSym(v.Aux) 18855 ptr := v_0 18856 if v_1.Op != OpAMD64FlagEQ { 18857 break 18858 } 18859 mem := v_2 18860 v.reset(OpAMD64MOVBstore) 18861 v.AuxInt = int32ToAuxInt(off) 18862 v.Aux = symToAux(sym) 18863 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18864 v0.AuxInt = int32ToAuxInt(0) 18865 v.AddArg3(ptr, v0, mem) 18866 return true 18867 } 18868 // match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem) 18869 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 18870 for { 18871 off := auxIntToInt32(v.AuxInt) 18872 sym := auxToSym(v.Aux) 18873 ptr := v_0 18874 if v_1.Op != OpAMD64FlagLT_ULT { 18875 break 18876 } 18877 mem := v_2 18878 v.reset(OpAMD64MOVBstore) 18879 v.AuxInt = int32ToAuxInt(off) 18880 v.Aux = symToAux(sym) 18881 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18882 v0.AuxInt = int32ToAuxInt(0) 18883 v.AddArg3(ptr, v0, mem) 18884 return true 18885 } 18886 // match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem) 18887 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 18888 for { 18889 off := auxIntToInt32(v.AuxInt) 18890 sym := auxToSym(v.Aux) 18891 ptr := v_0 18892 if v_1.Op != OpAMD64FlagLT_UGT { 18893 break 18894 } 18895 mem := v_2 18896 v.reset(OpAMD64MOVBstore) 18897 v.AuxInt = int32ToAuxInt(off) 18898 v.Aux = symToAux(sym) 18899 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18900 v0.AuxInt = int32ToAuxInt(0) 18901 v.AddArg3(ptr, v0, mem) 18902 return true 18903 } 18904 // match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem) 18905 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 18906 for { 18907 off := auxIntToInt32(v.AuxInt) 18908 sym := auxToSym(v.Aux) 18909 ptr := v_0 18910 if v_1.Op != OpAMD64FlagGT_ULT { 18911 break 18912 } 18913 mem := v_2 18914 v.reset(OpAMD64MOVBstore) 18915 v.AuxInt = int32ToAuxInt(off) 18916 v.Aux = symToAux(sym) 18917 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18918 v0.AuxInt = int32ToAuxInt(1) 18919 v.AddArg3(ptr, v0, mem) 18920 return true 18921 } 18922 // match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem) 18923 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 18924 for { 18925 off := auxIntToInt32(v.AuxInt) 18926 sym := auxToSym(v.Aux) 18927 ptr := v_0 18928 if v_1.Op != OpAMD64FlagGT_UGT { 18929 break 18930 } 18931 mem := v_2 18932 v.reset(OpAMD64MOVBstore) 18933 v.AuxInt = int32ToAuxInt(off) 18934 v.Aux = symToAux(sym) 18935 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 18936 v0.AuxInt = int32ToAuxInt(1) 18937 v.AddArg3(ptr, v0, mem) 18938 return true 18939 } 18940 return false 18941 } 18942 func rewriteValueAMD64_OpAMD64SETL(v *Value) bool { 18943 v_0 := v.Args[0] 18944 // match: (SETL (InvertFlags x)) 18945 // result: (SETG x) 18946 for { 18947 if v_0.Op != OpAMD64InvertFlags { 18948 break 18949 } 18950 x := v_0.Args[0] 18951 v.reset(OpAMD64SETG) 18952 v.AddArg(x) 18953 return true 18954 } 18955 // match: (SETL (FlagEQ)) 18956 // result: (MOVLconst [0]) 18957 for { 18958 if v_0.Op != OpAMD64FlagEQ { 18959 break 18960 } 18961 v.reset(OpAMD64MOVLconst) 18962 v.AuxInt = int32ToAuxInt(0) 18963 return true 18964 } 18965 // match: (SETL (FlagLT_ULT)) 18966 // result: (MOVLconst [1]) 18967 for { 18968 if v_0.Op != OpAMD64FlagLT_ULT { 18969 break 18970 } 18971 v.reset(OpAMD64MOVLconst) 18972 v.AuxInt = int32ToAuxInt(1) 18973 return true 18974 } 18975 // match: (SETL (FlagLT_UGT)) 18976 // result: (MOVLconst [1]) 18977 for { 18978 if v_0.Op != OpAMD64FlagLT_UGT { 18979 break 18980 } 18981 v.reset(OpAMD64MOVLconst) 18982 v.AuxInt = int32ToAuxInt(1) 18983 return true 18984 } 18985 // match: (SETL (FlagGT_ULT)) 18986 // result: (MOVLconst [0]) 18987 for { 18988 if v_0.Op != OpAMD64FlagGT_ULT { 18989 break 18990 } 18991 v.reset(OpAMD64MOVLconst) 18992 v.AuxInt = int32ToAuxInt(0) 18993 return true 18994 } 18995 // match: (SETL (FlagGT_UGT)) 18996 // result: (MOVLconst [0]) 18997 for { 18998 if v_0.Op != OpAMD64FlagGT_UGT { 18999 break 19000 } 19001 v.reset(OpAMD64MOVLconst) 19002 v.AuxInt = int32ToAuxInt(0) 19003 return true 19004 } 19005 return false 19006 } 19007 func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool { 19008 v_0 := v.Args[0] 19009 // match: (SETLE (InvertFlags x)) 19010 // result: (SETGE x) 19011 for { 19012 if v_0.Op != OpAMD64InvertFlags { 19013 break 19014 } 19015 x := v_0.Args[0] 19016 v.reset(OpAMD64SETGE) 19017 v.AddArg(x) 19018 return true 19019 } 19020 // match: (SETLE (FlagEQ)) 19021 // result: (MOVLconst [1]) 19022 for { 19023 if v_0.Op != OpAMD64FlagEQ { 19024 break 19025 } 19026 v.reset(OpAMD64MOVLconst) 19027 v.AuxInt = int32ToAuxInt(1) 19028 return true 19029 } 19030 // match: (SETLE (FlagLT_ULT)) 19031 // result: (MOVLconst [1]) 19032 for { 19033 if v_0.Op != OpAMD64FlagLT_ULT { 19034 break 19035 } 19036 v.reset(OpAMD64MOVLconst) 19037 v.AuxInt = int32ToAuxInt(1) 19038 return true 19039 } 19040 // match: (SETLE (FlagLT_UGT)) 19041 // result: (MOVLconst [1]) 19042 for { 19043 if v_0.Op != OpAMD64FlagLT_UGT { 19044 break 19045 } 19046 v.reset(OpAMD64MOVLconst) 19047 v.AuxInt = int32ToAuxInt(1) 19048 return true 19049 } 19050 // match: (SETLE (FlagGT_ULT)) 19051 // result: (MOVLconst [0]) 19052 for { 19053 if v_0.Op != OpAMD64FlagGT_ULT { 19054 break 19055 } 19056 v.reset(OpAMD64MOVLconst) 19057 v.AuxInt = int32ToAuxInt(0) 19058 return true 19059 } 19060 // match: (SETLE (FlagGT_UGT)) 19061 // result: (MOVLconst [0]) 19062 for { 19063 if v_0.Op != OpAMD64FlagGT_UGT { 19064 break 19065 } 19066 v.reset(OpAMD64MOVLconst) 19067 v.AuxInt = int32ToAuxInt(0) 19068 return true 19069 } 19070 return false 19071 } 19072 func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { 19073 v_2 := v.Args[2] 19074 v_1 := v.Args[1] 19075 v_0 := v.Args[0] 19076 b := v.Block 19077 typ := &b.Func.Config.Types 19078 // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem) 19079 // result: (SETGEstore [off] {sym} ptr x mem) 19080 for { 19081 off := auxIntToInt32(v.AuxInt) 19082 sym := auxToSym(v.Aux) 19083 ptr := v_0 19084 if v_1.Op != OpAMD64InvertFlags { 19085 break 19086 } 19087 x := v_1.Args[0] 19088 mem := v_2 19089 v.reset(OpAMD64SETGEstore) 19090 v.AuxInt = int32ToAuxInt(off) 19091 v.Aux = symToAux(sym) 19092 v.AddArg3(ptr, x, mem) 19093 return true 19094 } 19095 // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem) 19096 // cond: is32Bit(int64(off1)+int64(off2)) 19097 // result: (SETLEstore [off1+off2] {sym} base val mem) 19098 for { 19099 off1 := auxIntToInt32(v.AuxInt) 19100 sym := auxToSym(v.Aux) 19101 if v_0.Op != OpAMD64ADDQconst { 19102 break 19103 } 19104 off2 := auxIntToInt32(v_0.AuxInt) 19105 base := v_0.Args[0] 19106 val := v_1 19107 mem := v_2 19108 if !(is32Bit(int64(off1) + int64(off2))) { 19109 break 19110 } 19111 v.reset(OpAMD64SETLEstore) 19112 v.AuxInt = int32ToAuxInt(off1 + off2) 19113 v.Aux = symToAux(sym) 19114 v.AddArg3(base, val, mem) 19115 return true 19116 } 19117 // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 19118 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 19119 // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 19120 for { 19121 off1 := auxIntToInt32(v.AuxInt) 19122 sym1 := auxToSym(v.Aux) 19123 if v_0.Op != OpAMD64LEAQ { 19124 break 19125 } 19126 off2 := auxIntToInt32(v_0.AuxInt) 19127 sym2 := auxToSym(v_0.Aux) 19128 base := v_0.Args[0] 19129 val := v_1 19130 mem := v_2 19131 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 19132 break 19133 } 19134 v.reset(OpAMD64SETLEstore) 19135 v.AuxInt = int32ToAuxInt(off1 + off2) 19136 v.Aux = symToAux(mergeSym(sym1, sym2)) 19137 v.AddArg3(base, val, mem) 19138 return true 19139 } 19140 // match: (SETLEstore [off] {sym} ptr (FlagEQ) mem) 19141 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 19142 for { 19143 off := auxIntToInt32(v.AuxInt) 19144 sym := auxToSym(v.Aux) 19145 ptr := v_0 19146 if v_1.Op != OpAMD64FlagEQ { 19147 break 19148 } 19149 mem := v_2 19150 v.reset(OpAMD64MOVBstore) 19151 v.AuxInt = int32ToAuxInt(off) 19152 v.Aux = symToAux(sym) 19153 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19154 v0.AuxInt = int32ToAuxInt(1) 19155 v.AddArg3(ptr, v0, mem) 19156 return true 19157 } 19158 // match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) 19159 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 19160 for { 19161 off := auxIntToInt32(v.AuxInt) 19162 sym := auxToSym(v.Aux) 19163 ptr := v_0 19164 if v_1.Op != OpAMD64FlagLT_ULT { 19165 break 19166 } 19167 mem := v_2 19168 v.reset(OpAMD64MOVBstore) 19169 v.AuxInt = int32ToAuxInt(off) 19170 v.Aux = symToAux(sym) 19171 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19172 v0.AuxInt = int32ToAuxInt(1) 19173 v.AddArg3(ptr, v0, mem) 19174 return true 19175 } 19176 // match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) 19177 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 19178 for { 19179 off := auxIntToInt32(v.AuxInt) 19180 sym := auxToSym(v.Aux) 19181 ptr := v_0 19182 if v_1.Op != OpAMD64FlagLT_UGT { 19183 break 19184 } 19185 mem := v_2 19186 v.reset(OpAMD64MOVBstore) 19187 v.AuxInt = int32ToAuxInt(off) 19188 v.Aux = symToAux(sym) 19189 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19190 v0.AuxInt = int32ToAuxInt(1) 19191 v.AddArg3(ptr, v0, mem) 19192 return true 19193 } 19194 // match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) 19195 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 19196 for { 19197 off := auxIntToInt32(v.AuxInt) 19198 sym := auxToSym(v.Aux) 19199 ptr := v_0 19200 if v_1.Op != OpAMD64FlagGT_ULT { 19201 break 19202 } 19203 mem := v_2 19204 v.reset(OpAMD64MOVBstore) 19205 v.AuxInt = int32ToAuxInt(off) 19206 v.Aux = symToAux(sym) 19207 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19208 v0.AuxInt = int32ToAuxInt(0) 19209 v.AddArg3(ptr, v0, mem) 19210 return true 19211 } 19212 // match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) 19213 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 19214 for { 19215 off := auxIntToInt32(v.AuxInt) 19216 sym := auxToSym(v.Aux) 19217 ptr := v_0 19218 if v_1.Op != OpAMD64FlagGT_UGT { 19219 break 19220 } 19221 mem := v_2 19222 v.reset(OpAMD64MOVBstore) 19223 v.AuxInt = int32ToAuxInt(off) 19224 v.Aux = symToAux(sym) 19225 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19226 v0.AuxInt = int32ToAuxInt(0) 19227 v.AddArg3(ptr, v0, mem) 19228 return true 19229 } 19230 return false 19231 } 19232 func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { 19233 v_2 := v.Args[2] 19234 v_1 := v.Args[1] 19235 v_0 := v.Args[0] 19236 b := v.Block 19237 typ := &b.Func.Config.Types 19238 // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem) 19239 // result: (SETGstore [off] {sym} ptr x mem) 19240 for { 19241 off := auxIntToInt32(v.AuxInt) 19242 sym := auxToSym(v.Aux) 19243 ptr := v_0 19244 if v_1.Op != OpAMD64InvertFlags { 19245 break 19246 } 19247 x := v_1.Args[0] 19248 mem := v_2 19249 v.reset(OpAMD64SETGstore) 19250 v.AuxInt = int32ToAuxInt(off) 19251 v.Aux = symToAux(sym) 19252 v.AddArg3(ptr, x, mem) 19253 return true 19254 } 19255 // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem) 19256 // cond: is32Bit(int64(off1)+int64(off2)) 19257 // result: (SETLstore [off1+off2] {sym} base val mem) 19258 for { 19259 off1 := auxIntToInt32(v.AuxInt) 19260 sym := auxToSym(v.Aux) 19261 if v_0.Op != OpAMD64ADDQconst { 19262 break 19263 } 19264 off2 := auxIntToInt32(v_0.AuxInt) 19265 base := v_0.Args[0] 19266 val := v_1 19267 mem := v_2 19268 if !(is32Bit(int64(off1) + int64(off2))) { 19269 break 19270 } 19271 v.reset(OpAMD64SETLstore) 19272 v.AuxInt = int32ToAuxInt(off1 + off2) 19273 v.Aux = symToAux(sym) 19274 v.AddArg3(base, val, mem) 19275 return true 19276 } 19277 // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 19278 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 19279 // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 19280 for { 19281 off1 := auxIntToInt32(v.AuxInt) 19282 sym1 := auxToSym(v.Aux) 19283 if v_0.Op != OpAMD64LEAQ { 19284 break 19285 } 19286 off2 := auxIntToInt32(v_0.AuxInt) 19287 sym2 := auxToSym(v_0.Aux) 19288 base := v_0.Args[0] 19289 val := v_1 19290 mem := v_2 19291 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 19292 break 19293 } 19294 v.reset(OpAMD64SETLstore) 19295 v.AuxInt = int32ToAuxInt(off1 + off2) 19296 v.Aux = symToAux(mergeSym(sym1, sym2)) 19297 v.AddArg3(base, val, mem) 19298 return true 19299 } 19300 // match: (SETLstore [off] {sym} ptr (FlagEQ) mem) 19301 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 19302 for { 19303 off := auxIntToInt32(v.AuxInt) 19304 sym := auxToSym(v.Aux) 19305 ptr := v_0 19306 if v_1.Op != OpAMD64FlagEQ { 19307 break 19308 } 19309 mem := v_2 19310 v.reset(OpAMD64MOVBstore) 19311 v.AuxInt = int32ToAuxInt(off) 19312 v.Aux = symToAux(sym) 19313 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19314 v0.AuxInt = int32ToAuxInt(0) 19315 v.AddArg3(ptr, v0, mem) 19316 return true 19317 } 19318 // match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem) 19319 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 19320 for { 19321 off := auxIntToInt32(v.AuxInt) 19322 sym := auxToSym(v.Aux) 19323 ptr := v_0 19324 if v_1.Op != OpAMD64FlagLT_ULT { 19325 break 19326 } 19327 mem := v_2 19328 v.reset(OpAMD64MOVBstore) 19329 v.AuxInt = int32ToAuxInt(off) 19330 v.Aux = symToAux(sym) 19331 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19332 v0.AuxInt = int32ToAuxInt(1) 19333 v.AddArg3(ptr, v0, mem) 19334 return true 19335 } 19336 // match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem) 19337 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 19338 for { 19339 off := auxIntToInt32(v.AuxInt) 19340 sym := auxToSym(v.Aux) 19341 ptr := v_0 19342 if v_1.Op != OpAMD64FlagLT_UGT { 19343 break 19344 } 19345 mem := v_2 19346 v.reset(OpAMD64MOVBstore) 19347 v.AuxInt = int32ToAuxInt(off) 19348 v.Aux = symToAux(sym) 19349 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19350 v0.AuxInt = int32ToAuxInt(1) 19351 v.AddArg3(ptr, v0, mem) 19352 return true 19353 } 19354 // match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem) 19355 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 19356 for { 19357 off := auxIntToInt32(v.AuxInt) 19358 sym := auxToSym(v.Aux) 19359 ptr := v_0 19360 if v_1.Op != OpAMD64FlagGT_ULT { 19361 break 19362 } 19363 mem := v_2 19364 v.reset(OpAMD64MOVBstore) 19365 v.AuxInt = int32ToAuxInt(off) 19366 v.Aux = symToAux(sym) 19367 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19368 v0.AuxInt = int32ToAuxInt(0) 19369 v.AddArg3(ptr, v0, mem) 19370 return true 19371 } 19372 // match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem) 19373 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 19374 for { 19375 off := auxIntToInt32(v.AuxInt) 19376 sym := auxToSym(v.Aux) 19377 ptr := v_0 19378 if v_1.Op != OpAMD64FlagGT_UGT { 19379 break 19380 } 19381 mem := v_2 19382 v.reset(OpAMD64MOVBstore) 19383 v.AuxInt = int32ToAuxInt(off) 19384 v.Aux = symToAux(sym) 19385 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 19386 v0.AuxInt = int32ToAuxInt(0) 19387 v.AddArg3(ptr, v0, mem) 19388 return true 19389 } 19390 return false 19391 } 19392 func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { 19393 v_0 := v.Args[0] 19394 b := v.Block 19395 // match: (SETNE (TESTBconst [1] x)) 19396 // result: (ANDLconst [1] x) 19397 for { 19398 if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 { 19399 break 19400 } 19401 x := v_0.Args[0] 19402 v.reset(OpAMD64ANDLconst) 19403 v.AuxInt = int32ToAuxInt(1) 19404 v.AddArg(x) 19405 return true 19406 } 19407 // match: (SETNE (TESTWconst [1] x)) 19408 // result: (ANDLconst [1] x) 19409 for { 19410 if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 { 19411 break 19412 } 19413 x := v_0.Args[0] 19414 v.reset(OpAMD64ANDLconst) 19415 v.AuxInt = int32ToAuxInt(1) 19416 v.AddArg(x) 19417 return true 19418 } 19419 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 19420 // result: (SETB (BTL x y)) 19421 for { 19422 if v_0.Op != OpAMD64TESTL { 19423 break 19424 } 19425 _ = v_0.Args[1] 19426 v_0_0 := v_0.Args[0] 19427 v_0_1 := v_0.Args[1] 19428 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 19429 if v_0_0.Op != OpAMD64SHLL { 19430 continue 19431 } 19432 x := v_0_0.Args[1] 19433 v_0_0_0 := v_0_0.Args[0] 19434 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { 19435 continue 19436 } 19437 y := v_0_1 19438 v.reset(OpAMD64SETB) 19439 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 19440 v0.AddArg2(x, y) 19441 v.AddArg(v0) 19442 return true 19443 } 19444 break 19445 } 19446 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 19447 // result: (SETB (BTQ x y)) 19448 for { 19449 if v_0.Op != OpAMD64TESTQ { 19450 break 19451 } 19452 _ = v_0.Args[1] 19453 v_0_0 := v_0.Args[0] 19454 v_0_1 := v_0.Args[1] 19455 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 19456 if v_0_0.Op != OpAMD64SHLQ { 19457 continue 19458 } 19459 x := v_0_0.Args[1] 19460 v_0_0_0 := v_0_0.Args[0] 19461 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { 19462 continue 19463 } 19464 y := v_0_1 19465 v.reset(OpAMD64SETB) 19466 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 19467 v0.AddArg2(x, y) 19468 v.AddArg(v0) 19469 return true 19470 } 19471 break 19472 } 19473 // match: (SETNE (TESTLconst [c] x)) 19474 // cond: isUint32PowerOfTwo(int64(c)) 19475 // result: (SETB (BTLconst [int8(log32(c))] x)) 19476 for { 19477 if v_0.Op != OpAMD64TESTLconst { 19478 break 19479 } 19480 c := auxIntToInt32(v_0.AuxInt) 19481 x := v_0.Args[0] 19482 if !(isUint32PowerOfTwo(int64(c))) { 19483 break 19484 } 19485 v.reset(OpAMD64SETB) 19486 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 19487 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 19488 v0.AddArg(x) 19489 v.AddArg(v0) 19490 return true 19491 } 19492 // match: (SETNE (TESTQconst [c] x)) 19493 // cond: isUint64PowerOfTwo(int64(c)) 19494 // result: (SETB (BTQconst [int8(log32(c))] x)) 19495 for { 19496 if v_0.Op != OpAMD64TESTQconst { 19497 break 19498 } 19499 c := auxIntToInt32(v_0.AuxInt) 19500 x := v_0.Args[0] 19501 if !(isUint64PowerOfTwo(int64(c))) { 19502 break 19503 } 19504 v.reset(OpAMD64SETB) 19505 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 19506 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 19507 v0.AddArg(x) 19508 v.AddArg(v0) 19509 return true 19510 } 19511 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 19512 // cond: isUint64PowerOfTwo(c) 19513 // result: (SETB (BTQconst [int8(log64(c))] x)) 19514 for { 19515 if v_0.Op != OpAMD64TESTQ { 19516 break 19517 } 19518 _ = v_0.Args[1] 19519 v_0_0 := v_0.Args[0] 19520 v_0_1 := v_0.Args[1] 19521 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 19522 if v_0_0.Op != OpAMD64MOVQconst { 19523 continue 19524 } 19525 c := auxIntToInt64(v_0_0.AuxInt) 19526 x := v_0_1 19527 if !(isUint64PowerOfTwo(c)) { 19528 continue 19529 } 19530 v.reset(OpAMD64SETB) 19531 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 19532 v0.AuxInt = int8ToAuxInt(int8(log64(c))) 19533 v0.AddArg(x) 19534 v.AddArg(v0) 19535 return true 19536 } 19537 break 19538 } 19539 // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _))) 19540 // result: (SETEQ (CMPLconst [0] s)) 19541 for { 19542 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 { 19543 break 19544 } 19545 s := v_0.Args[0] 19546 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { 19547 break 19548 } 19549 v.reset(OpAMD64SETEQ) 19550 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 19551 v0.AuxInt = int32ToAuxInt(0) 19552 v0.AddArg(s) 19553 v.AddArg(v0) 19554 return true 19555 } 19556 // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _))) 19557 // result: (SETEQ (CMPQconst [0] s)) 19558 for { 19559 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 { 19560 break 19561 } 19562 s := v_0.Args[0] 19563 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { 19564 break 19565 } 19566 v.reset(OpAMD64SETEQ) 19567 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 19568 v0.AuxInt = int32ToAuxInt(0) 19569 v0.AddArg(s) 19570 v.AddArg(v0) 19571 return true 19572 } 19573 // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 19574 // cond: z1==z2 19575 // result: (SETB (BTQconst [63] x)) 19576 for { 19577 if v_0.Op != OpAMD64TESTQ { 19578 break 19579 } 19580 _ = v_0.Args[1] 19581 v_0_0 := v_0.Args[0] 19582 v_0_1 := v_0.Args[1] 19583 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 19584 z1 := v_0_0 19585 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { 19586 continue 19587 } 19588 z1_0 := z1.Args[0] 19589 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 19590 continue 19591 } 19592 x := z1_0.Args[0] 19593 z2 := v_0_1 19594 if !(z1 == z2) { 19595 continue 19596 } 19597 v.reset(OpAMD64SETB) 19598 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 19599 v0.AuxInt = int8ToAuxInt(63) 19600 v0.AddArg(x) 19601 v.AddArg(v0) 19602 return true 19603 } 19604 break 19605 } 19606 // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 19607 // cond: z1==z2 19608 // result: (SETB (BTQconst [31] x)) 19609 for { 19610 if v_0.Op != OpAMD64TESTL { 19611 break 19612 } 19613 _ = v_0.Args[1] 19614 v_0_0 := v_0.Args[0] 19615 v_0_1 := v_0.Args[1] 19616 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 19617 z1 := v_0_0 19618 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { 19619 continue 19620 } 19621 z1_0 := z1.Args[0] 19622 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { 19623 continue 19624 } 19625 x := z1_0.Args[0] 19626 z2 := v_0_1 19627 if !(z1 == z2) { 19628 continue 19629 } 19630 v.reset(OpAMD64SETB) 19631 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 19632 v0.AuxInt = int8ToAuxInt(31) 19633 v0.AddArg(x) 19634 v.AddArg(v0) 19635 return true 19636 } 19637 break 19638 } 19639 // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 19640 // cond: z1==z2 19641 // result: (SETB (BTQconst [0] x)) 19642 for { 19643 if v_0.Op != OpAMD64TESTQ { 19644 break 19645 } 19646 _ = v_0.Args[1] 19647 v_0_0 := v_0.Args[0] 19648 v_0_1 := v_0.Args[1] 19649 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 19650 z1 := v_0_0 19651 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 19652 continue 19653 } 19654 z1_0 := z1.Args[0] 19655 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 19656 continue 19657 } 19658 x := z1_0.Args[0] 19659 z2 := v_0_1 19660 if !(z1 == z2) { 19661 continue 19662 } 19663 v.reset(OpAMD64SETB) 19664 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 19665 v0.AuxInt = int8ToAuxInt(0) 19666 v0.AddArg(x) 19667 v.AddArg(v0) 19668 return true 19669 } 19670 break 19671 } 19672 // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 19673 // cond: z1==z2 19674 // result: (SETB (BTLconst [0] x)) 19675 for { 19676 if v_0.Op != OpAMD64TESTL { 19677 break 19678 } 19679 _ = v_0.Args[1] 19680 v_0_0 := v_0.Args[0] 19681 v_0_1 := v_0.Args[1] 19682 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 19683 z1 := v_0_0 19684 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 19685 continue 19686 } 19687 z1_0 := z1.Args[0] 19688 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 19689 continue 19690 } 19691 x := z1_0.Args[0] 19692 z2 := v_0_1 19693 if !(z1 == z2) { 19694 continue 19695 } 19696 v.reset(OpAMD64SETB) 19697 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 19698 v0.AuxInt = int8ToAuxInt(0) 19699 v0.AddArg(x) 19700 v.AddArg(v0) 19701 return true 19702 } 19703 break 19704 } 19705 // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2)) 19706 // cond: z1==z2 19707 // result: (SETB (BTQconst [63] x)) 19708 for { 19709 if v_0.Op != OpAMD64TESTQ { 19710 break 19711 } 19712 _ = v_0.Args[1] 19713 v_0_0 := v_0.Args[0] 19714 v_0_1 := v_0.Args[1] 19715 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 19716 z1 := v_0_0 19717 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 19718 continue 19719 } 19720 x := z1.Args[0] 19721 z2 := v_0_1 19722 if !(z1 == z2) { 19723 continue 19724 } 19725 v.reset(OpAMD64SETB) 19726 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 19727 v0.AuxInt = int8ToAuxInt(63) 19728 v0.AddArg(x) 19729 v.AddArg(v0) 19730 return true 19731 } 19732 break 19733 } 19734 // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2)) 19735 // cond: z1==z2 19736 // result: (SETB (BTLconst [31] x)) 19737 for { 19738 if v_0.Op != OpAMD64TESTL { 19739 break 19740 } 19741 _ = v_0.Args[1] 19742 v_0_0 := v_0.Args[0] 19743 v_0_1 := v_0.Args[1] 19744 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 19745 z1 := v_0_0 19746 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 19747 continue 19748 } 19749 x := z1.Args[0] 19750 z2 := v_0_1 19751 if !(z1 == z2) { 19752 continue 19753 } 19754 v.reset(OpAMD64SETB) 19755 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 19756 v0.AuxInt = int8ToAuxInt(31) 19757 v0.AddArg(x) 19758 v.AddArg(v0) 19759 return true 19760 } 19761 break 19762 } 19763 // match: (SETNE (InvertFlags x)) 19764 // result: (SETNE x) 19765 for { 19766 if v_0.Op != OpAMD64InvertFlags { 19767 break 19768 } 19769 x := v_0.Args[0] 19770 v.reset(OpAMD64SETNE) 19771 v.AddArg(x) 19772 return true 19773 } 19774 // match: (SETNE (FlagEQ)) 19775 // result: (MOVLconst [0]) 19776 for { 19777 if v_0.Op != OpAMD64FlagEQ { 19778 break 19779 } 19780 v.reset(OpAMD64MOVLconst) 19781 v.AuxInt = int32ToAuxInt(0) 19782 return true 19783 } 19784 // match: (SETNE (FlagLT_ULT)) 19785 // result: (MOVLconst [1]) 19786 for { 19787 if v_0.Op != OpAMD64FlagLT_ULT { 19788 break 19789 } 19790 v.reset(OpAMD64MOVLconst) 19791 v.AuxInt = int32ToAuxInt(1) 19792 return true 19793 } 19794 // match: (SETNE (FlagLT_UGT)) 19795 // result: (MOVLconst [1]) 19796 for { 19797 if v_0.Op != OpAMD64FlagLT_UGT { 19798 break 19799 } 19800 v.reset(OpAMD64MOVLconst) 19801 v.AuxInt = int32ToAuxInt(1) 19802 return true 19803 } 19804 // match: (SETNE (FlagGT_ULT)) 19805 // result: (MOVLconst [1]) 19806 for { 19807 if v_0.Op != OpAMD64FlagGT_ULT { 19808 break 19809 } 19810 v.reset(OpAMD64MOVLconst) 19811 v.AuxInt = int32ToAuxInt(1) 19812 return true 19813 } 19814 // match: (SETNE (FlagGT_UGT)) 19815 // result: (MOVLconst [1]) 19816 for { 19817 if v_0.Op != OpAMD64FlagGT_UGT { 19818 break 19819 } 19820 v.reset(OpAMD64MOVLconst) 19821 v.AuxInt = int32ToAuxInt(1) 19822 return true 19823 } 19824 // match: (SETNE (TESTQ s:(Select0 blsr:(BLSRQ _)) s)) 19825 // result: (SETNE (Select1 <types.TypeFlags> blsr)) 19826 for { 19827 if v_0.Op != OpAMD64TESTQ { 19828 break 19829 } 19830 _ = v_0.Args[1] 19831 v_0_0 := v_0.Args[0] 19832 v_0_1 := v_0.Args[1] 19833 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 19834 s := v_0_0 19835 if s.Op != OpSelect0 { 19836 continue 19837 } 19838 blsr := s.Args[0] 19839 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 { 19840 continue 19841 } 19842 v.reset(OpAMD64SETNE) 19843 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 19844 v0.AddArg(blsr) 19845 v.AddArg(v0) 19846 return true 19847 } 19848 break 19849 } 19850 // match: (SETNE (TESTL s:(Select0 blsr:(BLSRL _)) s)) 19851 // result: (SETNE (Select1 <types.TypeFlags> blsr)) 19852 for { 19853 if v_0.Op != OpAMD64TESTL { 19854 break 19855 } 19856 _ = v_0.Args[1] 19857 v_0_0 := v_0.Args[0] 19858 v_0_1 := v_0.Args[1] 19859 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 19860 s := v_0_0 19861 if s.Op != OpSelect0 { 19862 continue 19863 } 19864 blsr := s.Args[0] 19865 if blsr.Op != OpAMD64BLSRL || s != v_0_1 { 19866 continue 19867 } 19868 v.reset(OpAMD64SETNE) 19869 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 19870 v0.AddArg(blsr) 19871 v.AddArg(v0) 19872 return true 19873 } 19874 break 19875 } 19876 return false 19877 } 19878 func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { 19879 v_2 := v.Args[2] 19880 v_1 := v.Args[1] 19881 v_0 := v.Args[0] 19882 b := v.Block 19883 typ := &b.Func.Config.Types 19884 // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 19885 // result: (SETBstore [off] {sym} ptr (BTL x y) mem) 19886 for { 19887 off := auxIntToInt32(v.AuxInt) 19888 sym := auxToSym(v.Aux) 19889 ptr := v_0 19890 if v_1.Op != OpAMD64TESTL { 19891 break 19892 } 19893 _ = v_1.Args[1] 19894 v_1_0 := v_1.Args[0] 19895 v_1_1 := v_1.Args[1] 19896 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 19897 if v_1_0.Op != OpAMD64SHLL { 19898 continue 19899 } 19900 x := v_1_0.Args[1] 19901 v_1_0_0 := v_1_0.Args[0] 19902 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 { 19903 continue 19904 } 19905 y := v_1_1 19906 mem := v_2 19907 v.reset(OpAMD64SETBstore) 19908 v.AuxInt = int32ToAuxInt(off) 19909 v.Aux = symToAux(sym) 19910 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 19911 v0.AddArg2(x, y) 19912 v.AddArg3(ptr, v0, mem) 19913 return true 19914 } 19915 break 19916 } 19917 // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 19918 // result: (SETBstore [off] {sym} ptr (BTQ x y) mem) 19919 for { 19920 off := auxIntToInt32(v.AuxInt) 19921 sym := auxToSym(v.Aux) 19922 ptr := v_0 19923 if v_1.Op != OpAMD64TESTQ { 19924 break 19925 } 19926 _ = v_1.Args[1] 19927 v_1_0 := v_1.Args[0] 19928 v_1_1 := v_1.Args[1] 19929 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 19930 if v_1_0.Op != OpAMD64SHLQ { 19931 continue 19932 } 19933 x := v_1_0.Args[1] 19934 v_1_0_0 := v_1_0.Args[0] 19935 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 { 19936 continue 19937 } 19938 y := v_1_1 19939 mem := v_2 19940 v.reset(OpAMD64SETBstore) 19941 v.AuxInt = int32ToAuxInt(off) 19942 v.Aux = symToAux(sym) 19943 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 19944 v0.AddArg2(x, y) 19945 v.AddArg3(ptr, v0, mem) 19946 return true 19947 } 19948 break 19949 } 19950 // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem) 19951 // cond: isUint32PowerOfTwo(int64(c)) 19952 // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem) 19953 for { 19954 off := auxIntToInt32(v.AuxInt) 19955 sym := auxToSym(v.Aux) 19956 ptr := v_0 19957 if v_1.Op != OpAMD64TESTLconst { 19958 break 19959 } 19960 c := auxIntToInt32(v_1.AuxInt) 19961 x := v_1.Args[0] 19962 mem := v_2 19963 if !(isUint32PowerOfTwo(int64(c))) { 19964 break 19965 } 19966 v.reset(OpAMD64SETBstore) 19967 v.AuxInt = int32ToAuxInt(off) 19968 v.Aux = symToAux(sym) 19969 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 19970 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 19971 v0.AddArg(x) 19972 v.AddArg3(ptr, v0, mem) 19973 return true 19974 } 19975 // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem) 19976 // cond: isUint64PowerOfTwo(int64(c)) 19977 // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem) 19978 for { 19979 off := auxIntToInt32(v.AuxInt) 19980 sym := auxToSym(v.Aux) 19981 ptr := v_0 19982 if v_1.Op != OpAMD64TESTQconst { 19983 break 19984 } 19985 c := auxIntToInt32(v_1.AuxInt) 19986 x := v_1.Args[0] 19987 mem := v_2 19988 if !(isUint64PowerOfTwo(int64(c))) { 19989 break 19990 } 19991 v.reset(OpAMD64SETBstore) 19992 v.AuxInt = int32ToAuxInt(off) 19993 v.Aux = symToAux(sym) 19994 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 19995 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 19996 v0.AddArg(x) 19997 v.AddArg3(ptr, v0, mem) 19998 return true 19999 } 20000 // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 20001 // cond: isUint64PowerOfTwo(c) 20002 // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem) 20003 for { 20004 off := auxIntToInt32(v.AuxInt) 20005 sym := auxToSym(v.Aux) 20006 ptr := v_0 20007 if v_1.Op != OpAMD64TESTQ { 20008 break 20009 } 20010 _ = v_1.Args[1] 20011 v_1_0 := v_1.Args[0] 20012 v_1_1 := v_1.Args[1] 20013 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 20014 if v_1_0.Op != OpAMD64MOVQconst { 20015 continue 20016 } 20017 c := auxIntToInt64(v_1_0.AuxInt) 20018 x := v_1_1 20019 mem := v_2 20020 if !(isUint64PowerOfTwo(c)) { 20021 continue 20022 } 20023 v.reset(OpAMD64SETBstore) 20024 v.AuxInt = int32ToAuxInt(off) 20025 v.Aux = symToAux(sym) 20026 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 20027 v0.AuxInt = int8ToAuxInt(int8(log64(c))) 20028 v0.AddArg(x) 20029 v.AddArg3(ptr, v0, mem) 20030 return true 20031 } 20032 break 20033 } 20034 // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) 20035 // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem) 20036 for { 20037 off := auxIntToInt32(v.AuxInt) 20038 sym := auxToSym(v.Aux) 20039 ptr := v_0 20040 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 { 20041 break 20042 } 20043 s := v_1.Args[0] 20044 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { 20045 break 20046 } 20047 mem := v_2 20048 v.reset(OpAMD64SETEQstore) 20049 v.AuxInt = int32ToAuxInt(off) 20050 v.Aux = symToAux(sym) 20051 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 20052 v0.AuxInt = int32ToAuxInt(0) 20053 v0.AddArg(s) 20054 v.AddArg3(ptr, v0, mem) 20055 return true 20056 } 20057 // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) 20058 // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem) 20059 for { 20060 off := auxIntToInt32(v.AuxInt) 20061 sym := auxToSym(v.Aux) 20062 ptr := v_0 20063 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 { 20064 break 20065 } 20066 s := v_1.Args[0] 20067 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { 20068 break 20069 } 20070 mem := v_2 20071 v.reset(OpAMD64SETEQstore) 20072 v.AuxInt = int32ToAuxInt(off) 20073 v.Aux = symToAux(sym) 20074 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 20075 v0.AuxInt = int32ToAuxInt(0) 20076 v0.AddArg(s) 20077 v.AddArg3(ptr, v0, mem) 20078 return true 20079 } 20080 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) 20081 // cond: z1==z2 20082 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 20083 for { 20084 off := auxIntToInt32(v.AuxInt) 20085 sym := auxToSym(v.Aux) 20086 ptr := v_0 20087 if v_1.Op != OpAMD64TESTQ { 20088 break 20089 } 20090 _ = v_1.Args[1] 20091 v_1_0 := v_1.Args[0] 20092 v_1_1 := v_1.Args[1] 20093 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 20094 z1 := v_1_0 20095 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { 20096 continue 20097 } 20098 z1_0 := z1.Args[0] 20099 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 20100 continue 20101 } 20102 x := z1_0.Args[0] 20103 z2 := v_1_1 20104 mem := v_2 20105 if !(z1 == z2) { 20106 continue 20107 } 20108 v.reset(OpAMD64SETBstore) 20109 v.AuxInt = int32ToAuxInt(off) 20110 v.Aux = symToAux(sym) 20111 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 20112 v0.AuxInt = int8ToAuxInt(63) 20113 v0.AddArg(x) 20114 v.AddArg3(ptr, v0, mem) 20115 return true 20116 } 20117 break 20118 } 20119 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) 20120 // cond: z1==z2 20121 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 20122 for { 20123 off := auxIntToInt32(v.AuxInt) 20124 sym := auxToSym(v.Aux) 20125 ptr := v_0 20126 if v_1.Op != OpAMD64TESTL { 20127 break 20128 } 20129 _ = v_1.Args[1] 20130 v_1_0 := v_1.Args[0] 20131 v_1_1 := v_1.Args[1] 20132 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 20133 z1 := v_1_0 20134 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { 20135 continue 20136 } 20137 z1_0 := z1.Args[0] 20138 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 20139 continue 20140 } 20141 x := z1_0.Args[0] 20142 z2 := v_1_1 20143 mem := v_2 20144 if !(z1 == z2) { 20145 continue 20146 } 20147 v.reset(OpAMD64SETBstore) 20148 v.AuxInt = int32ToAuxInt(off) 20149 v.Aux = symToAux(sym) 20150 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 20151 v0.AuxInt = int8ToAuxInt(31) 20152 v0.AddArg(x) 20153 v.AddArg3(ptr, v0, mem) 20154 return true 20155 } 20156 break 20157 } 20158 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) 20159 // cond: z1==z2 20160 // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem) 20161 for { 20162 off := auxIntToInt32(v.AuxInt) 20163 sym := auxToSym(v.Aux) 20164 ptr := v_0 20165 if v_1.Op != OpAMD64TESTQ { 20166 break 20167 } 20168 _ = v_1.Args[1] 20169 v_1_0 := v_1.Args[0] 20170 v_1_1 := v_1.Args[1] 20171 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 20172 z1 := v_1_0 20173 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 20174 continue 20175 } 20176 z1_0 := z1.Args[0] 20177 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 20178 continue 20179 } 20180 x := z1_0.Args[0] 20181 z2 := v_1_1 20182 mem := v_2 20183 if !(z1 == z2) { 20184 continue 20185 } 20186 v.reset(OpAMD64SETBstore) 20187 v.AuxInt = int32ToAuxInt(off) 20188 v.Aux = symToAux(sym) 20189 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 20190 v0.AuxInt = int8ToAuxInt(0) 20191 v0.AddArg(x) 20192 v.AddArg3(ptr, v0, mem) 20193 return true 20194 } 20195 break 20196 } 20197 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) 20198 // cond: z1==z2 20199 // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem) 20200 for { 20201 off := auxIntToInt32(v.AuxInt) 20202 sym := auxToSym(v.Aux) 20203 ptr := v_0 20204 if v_1.Op != OpAMD64TESTL { 20205 break 20206 } 20207 _ = v_1.Args[1] 20208 v_1_0 := v_1.Args[0] 20209 v_1_1 := v_1.Args[1] 20210 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 20211 z1 := v_1_0 20212 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 20213 continue 20214 } 20215 z1_0 := z1.Args[0] 20216 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 20217 continue 20218 } 20219 x := z1_0.Args[0] 20220 z2 := v_1_1 20221 mem := v_2 20222 if !(z1 == z2) { 20223 continue 20224 } 20225 v.reset(OpAMD64SETBstore) 20226 v.AuxInt = int32ToAuxInt(off) 20227 v.Aux = symToAux(sym) 20228 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 20229 v0.AuxInt = int8ToAuxInt(0) 20230 v0.AddArg(x) 20231 v.AddArg3(ptr, v0, mem) 20232 return true 20233 } 20234 break 20235 } 20236 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) 20237 // cond: z1==z2 20238 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 20239 for { 20240 off := auxIntToInt32(v.AuxInt) 20241 sym := auxToSym(v.Aux) 20242 ptr := v_0 20243 if v_1.Op != OpAMD64TESTQ { 20244 break 20245 } 20246 _ = v_1.Args[1] 20247 v_1_0 := v_1.Args[0] 20248 v_1_1 := v_1.Args[1] 20249 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 20250 z1 := v_1_0 20251 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 20252 continue 20253 } 20254 x := z1.Args[0] 20255 z2 := v_1_1 20256 mem := v_2 20257 if !(z1 == z2) { 20258 continue 20259 } 20260 v.reset(OpAMD64SETBstore) 20261 v.AuxInt = int32ToAuxInt(off) 20262 v.Aux = symToAux(sym) 20263 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 20264 v0.AuxInt = int8ToAuxInt(63) 20265 v0.AddArg(x) 20266 v.AddArg3(ptr, v0, mem) 20267 return true 20268 } 20269 break 20270 } 20271 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) 20272 // cond: z1==z2 20273 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 20274 for { 20275 off := auxIntToInt32(v.AuxInt) 20276 sym := auxToSym(v.Aux) 20277 ptr := v_0 20278 if v_1.Op != OpAMD64TESTL { 20279 break 20280 } 20281 _ = v_1.Args[1] 20282 v_1_0 := v_1.Args[0] 20283 v_1_1 := v_1.Args[1] 20284 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { 20285 z1 := v_1_0 20286 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 20287 continue 20288 } 20289 x := z1.Args[0] 20290 z2 := v_1_1 20291 mem := v_2 20292 if !(z1 == z2) { 20293 continue 20294 } 20295 v.reset(OpAMD64SETBstore) 20296 v.AuxInt = int32ToAuxInt(off) 20297 v.Aux = symToAux(sym) 20298 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 20299 v0.AuxInt = int8ToAuxInt(31) 20300 v0.AddArg(x) 20301 v.AddArg3(ptr, v0, mem) 20302 return true 20303 } 20304 break 20305 } 20306 // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem) 20307 // result: (SETNEstore [off] {sym} ptr x mem) 20308 for { 20309 off := auxIntToInt32(v.AuxInt) 20310 sym := auxToSym(v.Aux) 20311 ptr := v_0 20312 if v_1.Op != OpAMD64InvertFlags { 20313 break 20314 } 20315 x := v_1.Args[0] 20316 mem := v_2 20317 v.reset(OpAMD64SETNEstore) 20318 v.AuxInt = int32ToAuxInt(off) 20319 v.Aux = symToAux(sym) 20320 v.AddArg3(ptr, x, mem) 20321 return true 20322 } 20323 // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem) 20324 // cond: is32Bit(int64(off1)+int64(off2)) 20325 // result: (SETNEstore [off1+off2] {sym} base val mem) 20326 for { 20327 off1 := auxIntToInt32(v.AuxInt) 20328 sym := auxToSym(v.Aux) 20329 if v_0.Op != OpAMD64ADDQconst { 20330 break 20331 } 20332 off2 := auxIntToInt32(v_0.AuxInt) 20333 base := v_0.Args[0] 20334 val := v_1 20335 mem := v_2 20336 if !(is32Bit(int64(off1) + int64(off2))) { 20337 break 20338 } 20339 v.reset(OpAMD64SETNEstore) 20340 v.AuxInt = int32ToAuxInt(off1 + off2) 20341 v.Aux = symToAux(sym) 20342 v.AddArg3(base, val, mem) 20343 return true 20344 } 20345 // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 20346 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 20347 // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 20348 for { 20349 off1 := auxIntToInt32(v.AuxInt) 20350 sym1 := auxToSym(v.Aux) 20351 if v_0.Op != OpAMD64LEAQ { 20352 break 20353 } 20354 off2 := auxIntToInt32(v_0.AuxInt) 20355 sym2 := auxToSym(v_0.Aux) 20356 base := v_0.Args[0] 20357 val := v_1 20358 mem := v_2 20359 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 20360 break 20361 } 20362 v.reset(OpAMD64SETNEstore) 20363 v.AuxInt = int32ToAuxInt(off1 + off2) 20364 v.Aux = symToAux(mergeSym(sym1, sym2)) 20365 v.AddArg3(base, val, mem) 20366 return true 20367 } 20368 // match: (SETNEstore [off] {sym} ptr (FlagEQ) mem) 20369 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 20370 for { 20371 off := auxIntToInt32(v.AuxInt) 20372 sym := auxToSym(v.Aux) 20373 ptr := v_0 20374 if v_1.Op != OpAMD64FlagEQ { 20375 break 20376 } 20377 mem := v_2 20378 v.reset(OpAMD64MOVBstore) 20379 v.AuxInt = int32ToAuxInt(off) 20380 v.Aux = symToAux(sym) 20381 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20382 v0.AuxInt = int32ToAuxInt(0) 20383 v.AddArg3(ptr, v0, mem) 20384 return true 20385 } 20386 // match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) 20387 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 20388 for { 20389 off := auxIntToInt32(v.AuxInt) 20390 sym := auxToSym(v.Aux) 20391 ptr := v_0 20392 if v_1.Op != OpAMD64FlagLT_ULT { 20393 break 20394 } 20395 mem := v_2 20396 v.reset(OpAMD64MOVBstore) 20397 v.AuxInt = int32ToAuxInt(off) 20398 v.Aux = symToAux(sym) 20399 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20400 v0.AuxInt = int32ToAuxInt(1) 20401 v.AddArg3(ptr, v0, mem) 20402 return true 20403 } 20404 // match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) 20405 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 20406 for { 20407 off := auxIntToInt32(v.AuxInt) 20408 sym := auxToSym(v.Aux) 20409 ptr := v_0 20410 if v_1.Op != OpAMD64FlagLT_UGT { 20411 break 20412 } 20413 mem := v_2 20414 v.reset(OpAMD64MOVBstore) 20415 v.AuxInt = int32ToAuxInt(off) 20416 v.Aux = symToAux(sym) 20417 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20418 v0.AuxInt = int32ToAuxInt(1) 20419 v.AddArg3(ptr, v0, mem) 20420 return true 20421 } 20422 // match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) 20423 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 20424 for { 20425 off := auxIntToInt32(v.AuxInt) 20426 sym := auxToSym(v.Aux) 20427 ptr := v_0 20428 if v_1.Op != OpAMD64FlagGT_ULT { 20429 break 20430 } 20431 mem := v_2 20432 v.reset(OpAMD64MOVBstore) 20433 v.AuxInt = int32ToAuxInt(off) 20434 v.Aux = symToAux(sym) 20435 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20436 v0.AuxInt = int32ToAuxInt(1) 20437 v.AddArg3(ptr, v0, mem) 20438 return true 20439 } 20440 // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) 20441 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 20442 for { 20443 off := auxIntToInt32(v.AuxInt) 20444 sym := auxToSym(v.Aux) 20445 ptr := v_0 20446 if v_1.Op != OpAMD64FlagGT_UGT { 20447 break 20448 } 20449 mem := v_2 20450 v.reset(OpAMD64MOVBstore) 20451 v.AuxInt = int32ToAuxInt(off) 20452 v.Aux = symToAux(sym) 20453 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 20454 v0.AuxInt = int32ToAuxInt(1) 20455 v.AddArg3(ptr, v0, mem) 20456 return true 20457 } 20458 return false 20459 } 20460 func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { 20461 v_1 := v.Args[1] 20462 v_0 := v.Args[0] 20463 b := v.Block 20464 // match: (SHLL x (MOVQconst [c])) 20465 // result: (SHLLconst [int8(c&31)] x) 20466 for { 20467 x := v_0 20468 if v_1.Op != OpAMD64MOVQconst { 20469 break 20470 } 20471 c := auxIntToInt64(v_1.AuxInt) 20472 v.reset(OpAMD64SHLLconst) 20473 v.AuxInt = int8ToAuxInt(int8(c & 31)) 20474 v.AddArg(x) 20475 return true 20476 } 20477 // match: (SHLL x (MOVLconst [c])) 20478 // result: (SHLLconst [int8(c&31)] x) 20479 for { 20480 x := v_0 20481 if v_1.Op != OpAMD64MOVLconst { 20482 break 20483 } 20484 c := auxIntToInt32(v_1.AuxInt) 20485 v.reset(OpAMD64SHLLconst) 20486 v.AuxInt = int8ToAuxInt(int8(c & 31)) 20487 v.AddArg(x) 20488 return true 20489 } 20490 // match: (SHLL x (ADDQconst [c] y)) 20491 // cond: c & 31 == 0 20492 // result: (SHLL x y) 20493 for { 20494 x := v_0 20495 if v_1.Op != OpAMD64ADDQconst { 20496 break 20497 } 20498 c := auxIntToInt32(v_1.AuxInt) 20499 y := v_1.Args[0] 20500 if !(c&31 == 0) { 20501 break 20502 } 20503 v.reset(OpAMD64SHLL) 20504 v.AddArg2(x, y) 20505 return true 20506 } 20507 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 20508 // cond: c & 31 == 0 20509 // result: (SHLL x (NEGQ <t> y)) 20510 for { 20511 x := v_0 20512 if v_1.Op != OpAMD64NEGQ { 20513 break 20514 } 20515 t := v_1.Type 20516 v_1_0 := v_1.Args[0] 20517 if v_1_0.Op != OpAMD64ADDQconst { 20518 break 20519 } 20520 c := auxIntToInt32(v_1_0.AuxInt) 20521 y := v_1_0.Args[0] 20522 if !(c&31 == 0) { 20523 break 20524 } 20525 v.reset(OpAMD64SHLL) 20526 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 20527 v0.AddArg(y) 20528 v.AddArg2(x, v0) 20529 return true 20530 } 20531 // match: (SHLL x (ANDQconst [c] y)) 20532 // cond: c & 31 == 31 20533 // result: (SHLL x y) 20534 for { 20535 x := v_0 20536 if v_1.Op != OpAMD64ANDQconst { 20537 break 20538 } 20539 c := auxIntToInt32(v_1.AuxInt) 20540 y := v_1.Args[0] 20541 if !(c&31 == 31) { 20542 break 20543 } 20544 v.reset(OpAMD64SHLL) 20545 v.AddArg2(x, y) 20546 return true 20547 } 20548 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 20549 // cond: c & 31 == 31 20550 // result: (SHLL x (NEGQ <t> y)) 20551 for { 20552 x := v_0 20553 if v_1.Op != OpAMD64NEGQ { 20554 break 20555 } 20556 t := v_1.Type 20557 v_1_0 := v_1.Args[0] 20558 if v_1_0.Op != OpAMD64ANDQconst { 20559 break 20560 } 20561 c := auxIntToInt32(v_1_0.AuxInt) 20562 y := v_1_0.Args[0] 20563 if !(c&31 == 31) { 20564 break 20565 } 20566 v.reset(OpAMD64SHLL) 20567 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 20568 v0.AddArg(y) 20569 v.AddArg2(x, v0) 20570 return true 20571 } 20572 // match: (SHLL x (ADDLconst [c] y)) 20573 // cond: c & 31 == 0 20574 // result: (SHLL x y) 20575 for { 20576 x := v_0 20577 if v_1.Op != OpAMD64ADDLconst { 20578 break 20579 } 20580 c := auxIntToInt32(v_1.AuxInt) 20581 y := v_1.Args[0] 20582 if !(c&31 == 0) { 20583 break 20584 } 20585 v.reset(OpAMD64SHLL) 20586 v.AddArg2(x, y) 20587 return true 20588 } 20589 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 20590 // cond: c & 31 == 0 20591 // result: (SHLL x (NEGL <t> y)) 20592 for { 20593 x := v_0 20594 if v_1.Op != OpAMD64NEGL { 20595 break 20596 } 20597 t := v_1.Type 20598 v_1_0 := v_1.Args[0] 20599 if v_1_0.Op != OpAMD64ADDLconst { 20600 break 20601 } 20602 c := auxIntToInt32(v_1_0.AuxInt) 20603 y := v_1_0.Args[0] 20604 if !(c&31 == 0) { 20605 break 20606 } 20607 v.reset(OpAMD64SHLL) 20608 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 20609 v0.AddArg(y) 20610 v.AddArg2(x, v0) 20611 return true 20612 } 20613 // match: (SHLL x (ANDLconst [c] y)) 20614 // cond: c & 31 == 31 20615 // result: (SHLL x y) 20616 for { 20617 x := v_0 20618 if v_1.Op != OpAMD64ANDLconst { 20619 break 20620 } 20621 c := auxIntToInt32(v_1.AuxInt) 20622 y := v_1.Args[0] 20623 if !(c&31 == 31) { 20624 break 20625 } 20626 v.reset(OpAMD64SHLL) 20627 v.AddArg2(x, y) 20628 return true 20629 } 20630 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 20631 // cond: c & 31 == 31 20632 // result: (SHLL x (NEGL <t> y)) 20633 for { 20634 x := v_0 20635 if v_1.Op != OpAMD64NEGL { 20636 break 20637 } 20638 t := v_1.Type 20639 v_1_0 := v_1.Args[0] 20640 if v_1_0.Op != OpAMD64ANDLconst { 20641 break 20642 } 20643 c := auxIntToInt32(v_1_0.AuxInt) 20644 y := v_1_0.Args[0] 20645 if !(c&31 == 31) { 20646 break 20647 } 20648 v.reset(OpAMD64SHLL) 20649 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 20650 v0.AddArg(y) 20651 v.AddArg2(x, v0) 20652 return true 20653 } 20654 // match: (SHLL l:(MOVLload [off] {sym} ptr mem) x) 20655 // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) 20656 // result: (SHLXLload [off] {sym} ptr x mem) 20657 for { 20658 l := v_0 20659 if l.Op != OpAMD64MOVLload { 20660 break 20661 } 20662 off := auxIntToInt32(l.AuxInt) 20663 sym := auxToSym(l.Aux) 20664 mem := l.Args[1] 20665 ptr := l.Args[0] 20666 x := v_1 20667 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { 20668 break 20669 } 20670 v.reset(OpAMD64SHLXLload) 20671 v.AuxInt = int32ToAuxInt(off) 20672 v.Aux = symToAux(sym) 20673 v.AddArg3(ptr, x, mem) 20674 return true 20675 } 20676 return false 20677 } 20678 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool { 20679 v_0 := v.Args[0] 20680 // match: (SHLLconst [1] (SHRLconst [1] x)) 20681 // result: (ANDLconst [-2] x) 20682 for { 20683 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 { 20684 break 20685 } 20686 x := v_0.Args[0] 20687 v.reset(OpAMD64ANDLconst) 20688 v.AuxInt = int32ToAuxInt(-2) 20689 v.AddArg(x) 20690 return true 20691 } 20692 // match: (SHLLconst x [0]) 20693 // result: x 20694 for { 20695 if auxIntToInt8(v.AuxInt) != 0 { 20696 break 20697 } 20698 x := v_0 20699 v.copyOf(x) 20700 return true 20701 } 20702 // match: (SHLLconst [d] (MOVLconst [c])) 20703 // result: (MOVLconst [c << uint64(d)]) 20704 for { 20705 d := auxIntToInt8(v.AuxInt) 20706 if v_0.Op != OpAMD64MOVLconst { 20707 break 20708 } 20709 c := auxIntToInt32(v_0.AuxInt) 20710 v.reset(OpAMD64MOVLconst) 20711 v.AuxInt = int32ToAuxInt(c << uint64(d)) 20712 return true 20713 } 20714 return false 20715 } 20716 func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { 20717 v_1 := v.Args[1] 20718 v_0 := v.Args[0] 20719 b := v.Block 20720 // match: (SHLQ x (MOVQconst [c])) 20721 // result: (SHLQconst [int8(c&63)] x) 20722 for { 20723 x := v_0 20724 if v_1.Op != OpAMD64MOVQconst { 20725 break 20726 } 20727 c := auxIntToInt64(v_1.AuxInt) 20728 v.reset(OpAMD64SHLQconst) 20729 v.AuxInt = int8ToAuxInt(int8(c & 63)) 20730 v.AddArg(x) 20731 return true 20732 } 20733 // match: (SHLQ x (MOVLconst [c])) 20734 // result: (SHLQconst [int8(c&63)] x) 20735 for { 20736 x := v_0 20737 if v_1.Op != OpAMD64MOVLconst { 20738 break 20739 } 20740 c := auxIntToInt32(v_1.AuxInt) 20741 v.reset(OpAMD64SHLQconst) 20742 v.AuxInt = int8ToAuxInt(int8(c & 63)) 20743 v.AddArg(x) 20744 return true 20745 } 20746 // match: (SHLQ x (ADDQconst [c] y)) 20747 // cond: c & 63 == 0 20748 // result: (SHLQ x y) 20749 for { 20750 x := v_0 20751 if v_1.Op != OpAMD64ADDQconst { 20752 break 20753 } 20754 c := auxIntToInt32(v_1.AuxInt) 20755 y := v_1.Args[0] 20756 if !(c&63 == 0) { 20757 break 20758 } 20759 v.reset(OpAMD64SHLQ) 20760 v.AddArg2(x, y) 20761 return true 20762 } 20763 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 20764 // cond: c & 63 == 0 20765 // result: (SHLQ x (NEGQ <t> y)) 20766 for { 20767 x := v_0 20768 if v_1.Op != OpAMD64NEGQ { 20769 break 20770 } 20771 t := v_1.Type 20772 v_1_0 := v_1.Args[0] 20773 if v_1_0.Op != OpAMD64ADDQconst { 20774 break 20775 } 20776 c := auxIntToInt32(v_1_0.AuxInt) 20777 y := v_1_0.Args[0] 20778 if !(c&63 == 0) { 20779 break 20780 } 20781 v.reset(OpAMD64SHLQ) 20782 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 20783 v0.AddArg(y) 20784 v.AddArg2(x, v0) 20785 return true 20786 } 20787 // match: (SHLQ x (ANDQconst [c] y)) 20788 // cond: c & 63 == 63 20789 // result: (SHLQ x y) 20790 for { 20791 x := v_0 20792 if v_1.Op != OpAMD64ANDQconst { 20793 break 20794 } 20795 c := auxIntToInt32(v_1.AuxInt) 20796 y := v_1.Args[0] 20797 if !(c&63 == 63) { 20798 break 20799 } 20800 v.reset(OpAMD64SHLQ) 20801 v.AddArg2(x, y) 20802 return true 20803 } 20804 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 20805 // cond: c & 63 == 63 20806 // result: (SHLQ x (NEGQ <t> y)) 20807 for { 20808 x := v_0 20809 if v_1.Op != OpAMD64NEGQ { 20810 break 20811 } 20812 t := v_1.Type 20813 v_1_0 := v_1.Args[0] 20814 if v_1_0.Op != OpAMD64ANDQconst { 20815 break 20816 } 20817 c := auxIntToInt32(v_1_0.AuxInt) 20818 y := v_1_0.Args[0] 20819 if !(c&63 == 63) { 20820 break 20821 } 20822 v.reset(OpAMD64SHLQ) 20823 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 20824 v0.AddArg(y) 20825 v.AddArg2(x, v0) 20826 return true 20827 } 20828 // match: (SHLQ x (ADDLconst [c] y)) 20829 // cond: c & 63 == 0 20830 // result: (SHLQ x y) 20831 for { 20832 x := v_0 20833 if v_1.Op != OpAMD64ADDLconst { 20834 break 20835 } 20836 c := auxIntToInt32(v_1.AuxInt) 20837 y := v_1.Args[0] 20838 if !(c&63 == 0) { 20839 break 20840 } 20841 v.reset(OpAMD64SHLQ) 20842 v.AddArg2(x, y) 20843 return true 20844 } 20845 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 20846 // cond: c & 63 == 0 20847 // result: (SHLQ x (NEGL <t> y)) 20848 for { 20849 x := v_0 20850 if v_1.Op != OpAMD64NEGL { 20851 break 20852 } 20853 t := v_1.Type 20854 v_1_0 := v_1.Args[0] 20855 if v_1_0.Op != OpAMD64ADDLconst { 20856 break 20857 } 20858 c := auxIntToInt32(v_1_0.AuxInt) 20859 y := v_1_0.Args[0] 20860 if !(c&63 == 0) { 20861 break 20862 } 20863 v.reset(OpAMD64SHLQ) 20864 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 20865 v0.AddArg(y) 20866 v.AddArg2(x, v0) 20867 return true 20868 } 20869 // match: (SHLQ x (ANDLconst [c] y)) 20870 // cond: c & 63 == 63 20871 // result: (SHLQ x y) 20872 for { 20873 x := v_0 20874 if v_1.Op != OpAMD64ANDLconst { 20875 break 20876 } 20877 c := auxIntToInt32(v_1.AuxInt) 20878 y := v_1.Args[0] 20879 if !(c&63 == 63) { 20880 break 20881 } 20882 v.reset(OpAMD64SHLQ) 20883 v.AddArg2(x, y) 20884 return true 20885 } 20886 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 20887 // cond: c & 63 == 63 20888 // result: (SHLQ x (NEGL <t> y)) 20889 for { 20890 x := v_0 20891 if v_1.Op != OpAMD64NEGL { 20892 break 20893 } 20894 t := v_1.Type 20895 v_1_0 := v_1.Args[0] 20896 if v_1_0.Op != OpAMD64ANDLconst { 20897 break 20898 } 20899 c := auxIntToInt32(v_1_0.AuxInt) 20900 y := v_1_0.Args[0] 20901 if !(c&63 == 63) { 20902 break 20903 } 20904 v.reset(OpAMD64SHLQ) 20905 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 20906 v0.AddArg(y) 20907 v.AddArg2(x, v0) 20908 return true 20909 } 20910 // match: (SHLQ l:(MOVQload [off] {sym} ptr mem) x) 20911 // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) 20912 // result: (SHLXQload [off] {sym} ptr x mem) 20913 for { 20914 l := v_0 20915 if l.Op != OpAMD64MOVQload { 20916 break 20917 } 20918 off := auxIntToInt32(l.AuxInt) 20919 sym := auxToSym(l.Aux) 20920 mem := l.Args[1] 20921 ptr := l.Args[0] 20922 x := v_1 20923 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { 20924 break 20925 } 20926 v.reset(OpAMD64SHLXQload) 20927 v.AuxInt = int32ToAuxInt(off) 20928 v.Aux = symToAux(sym) 20929 v.AddArg3(ptr, x, mem) 20930 return true 20931 } 20932 return false 20933 } 20934 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool { 20935 v_0 := v.Args[0] 20936 // match: (SHLQconst [1] (SHRQconst [1] x)) 20937 // result: (ANDQconst [-2] x) 20938 for { 20939 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 { 20940 break 20941 } 20942 x := v_0.Args[0] 20943 v.reset(OpAMD64ANDQconst) 20944 v.AuxInt = int32ToAuxInt(-2) 20945 v.AddArg(x) 20946 return true 20947 } 20948 // match: (SHLQconst x [0]) 20949 // result: x 20950 for { 20951 if auxIntToInt8(v.AuxInt) != 0 { 20952 break 20953 } 20954 x := v_0 20955 v.copyOf(x) 20956 return true 20957 } 20958 // match: (SHLQconst [d] (MOVQconst [c])) 20959 // result: (MOVQconst [c << uint64(d)]) 20960 for { 20961 d := auxIntToInt8(v.AuxInt) 20962 if v_0.Op != OpAMD64MOVQconst { 20963 break 20964 } 20965 c := auxIntToInt64(v_0.AuxInt) 20966 v.reset(OpAMD64MOVQconst) 20967 v.AuxInt = int64ToAuxInt(c << uint64(d)) 20968 return true 20969 } 20970 // match: (SHLQconst [d] (MOVLconst [c])) 20971 // result: (MOVQconst [int64(c) << uint64(d)]) 20972 for { 20973 d := auxIntToInt8(v.AuxInt) 20974 if v_0.Op != OpAMD64MOVLconst { 20975 break 20976 } 20977 c := auxIntToInt32(v_0.AuxInt) 20978 v.reset(OpAMD64MOVQconst) 20979 v.AuxInt = int64ToAuxInt(int64(c) << uint64(d)) 20980 return true 20981 } 20982 return false 20983 } 20984 func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool { 20985 v_2 := v.Args[2] 20986 v_1 := v.Args[1] 20987 v_0 := v.Args[0] 20988 b := v.Block 20989 typ := &b.Func.Config.Types 20990 // match: (SHLXLload [off] {sym} ptr (MOVLconst [c]) mem) 20991 // result: (SHLLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem)) 20992 for { 20993 off := auxIntToInt32(v.AuxInt) 20994 sym := auxToSym(v.Aux) 20995 ptr := v_0 20996 if v_1.Op != OpAMD64MOVLconst { 20997 break 20998 } 20999 c := auxIntToInt32(v_1.AuxInt) 21000 mem := v_2 21001 v.reset(OpAMD64SHLLconst) 21002 v.AuxInt = int8ToAuxInt(int8(c & 31)) 21003 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 21004 v0.AuxInt = int32ToAuxInt(off) 21005 v0.Aux = symToAux(sym) 21006 v0.AddArg2(ptr, mem) 21007 v.AddArg(v0) 21008 return true 21009 } 21010 return false 21011 } 21012 func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool { 21013 v_2 := v.Args[2] 21014 v_1 := v.Args[1] 21015 v_0 := v.Args[0] 21016 b := v.Block 21017 typ := &b.Func.Config.Types 21018 // match: (SHLXQload [off] {sym} ptr (MOVQconst [c]) mem) 21019 // result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) 21020 for { 21021 off := auxIntToInt32(v.AuxInt) 21022 sym := auxToSym(v.Aux) 21023 ptr := v_0 21024 if v_1.Op != OpAMD64MOVQconst { 21025 break 21026 } 21027 c := auxIntToInt64(v_1.AuxInt) 21028 mem := v_2 21029 v.reset(OpAMD64SHLQconst) 21030 v.AuxInt = int8ToAuxInt(int8(c & 63)) 21031 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 21032 v0.AuxInt = int32ToAuxInt(off) 21033 v0.Aux = symToAux(sym) 21034 v0.AddArg2(ptr, mem) 21035 v.AddArg(v0) 21036 return true 21037 } 21038 // match: (SHLXQload [off] {sym} ptr (MOVLconst [c]) mem) 21039 // result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) 21040 for { 21041 off := auxIntToInt32(v.AuxInt) 21042 sym := auxToSym(v.Aux) 21043 ptr := v_0 21044 if v_1.Op != OpAMD64MOVLconst { 21045 break 21046 } 21047 c := auxIntToInt32(v_1.AuxInt) 21048 mem := v_2 21049 v.reset(OpAMD64SHLQconst) 21050 v.AuxInt = int8ToAuxInt(int8(c & 63)) 21051 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 21052 v0.AuxInt = int32ToAuxInt(off) 21053 v0.Aux = symToAux(sym) 21054 v0.AddArg2(ptr, mem) 21055 v.AddArg(v0) 21056 return true 21057 } 21058 return false 21059 } 21060 func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool { 21061 v_1 := v.Args[1] 21062 v_0 := v.Args[0] 21063 // match: (SHRB x (MOVQconst [c])) 21064 // cond: c&31 < 8 21065 // result: (SHRBconst [int8(c&31)] x) 21066 for { 21067 x := v_0 21068 if v_1.Op != OpAMD64MOVQconst { 21069 break 21070 } 21071 c := auxIntToInt64(v_1.AuxInt) 21072 if !(c&31 < 8) { 21073 break 21074 } 21075 v.reset(OpAMD64SHRBconst) 21076 v.AuxInt = int8ToAuxInt(int8(c & 31)) 21077 v.AddArg(x) 21078 return true 21079 } 21080 // match: (SHRB x (MOVLconst [c])) 21081 // cond: c&31 < 8 21082 // result: (SHRBconst [int8(c&31)] x) 21083 for { 21084 x := v_0 21085 if v_1.Op != OpAMD64MOVLconst { 21086 break 21087 } 21088 c := auxIntToInt32(v_1.AuxInt) 21089 if !(c&31 < 8) { 21090 break 21091 } 21092 v.reset(OpAMD64SHRBconst) 21093 v.AuxInt = int8ToAuxInt(int8(c & 31)) 21094 v.AddArg(x) 21095 return true 21096 } 21097 // match: (SHRB _ (MOVQconst [c])) 21098 // cond: c&31 >= 8 21099 // result: (MOVLconst [0]) 21100 for { 21101 if v_1.Op != OpAMD64MOVQconst { 21102 break 21103 } 21104 c := auxIntToInt64(v_1.AuxInt) 21105 if !(c&31 >= 8) { 21106 break 21107 } 21108 v.reset(OpAMD64MOVLconst) 21109 v.AuxInt = int32ToAuxInt(0) 21110 return true 21111 } 21112 // match: (SHRB _ (MOVLconst [c])) 21113 // cond: c&31 >= 8 21114 // result: (MOVLconst [0]) 21115 for { 21116 if v_1.Op != OpAMD64MOVLconst { 21117 break 21118 } 21119 c := auxIntToInt32(v_1.AuxInt) 21120 if !(c&31 >= 8) { 21121 break 21122 } 21123 v.reset(OpAMD64MOVLconst) 21124 v.AuxInt = int32ToAuxInt(0) 21125 return true 21126 } 21127 return false 21128 } 21129 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool { 21130 v_0 := v.Args[0] 21131 // match: (SHRBconst x [0]) 21132 // result: x 21133 for { 21134 if auxIntToInt8(v.AuxInt) != 0 { 21135 break 21136 } 21137 x := v_0 21138 v.copyOf(x) 21139 return true 21140 } 21141 return false 21142 } 21143 func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { 21144 v_1 := v.Args[1] 21145 v_0 := v.Args[0] 21146 b := v.Block 21147 // match: (SHRL x (MOVQconst [c])) 21148 // result: (SHRLconst [int8(c&31)] x) 21149 for { 21150 x := v_0 21151 if v_1.Op != OpAMD64MOVQconst { 21152 break 21153 } 21154 c := auxIntToInt64(v_1.AuxInt) 21155 v.reset(OpAMD64SHRLconst) 21156 v.AuxInt = int8ToAuxInt(int8(c & 31)) 21157 v.AddArg(x) 21158 return true 21159 } 21160 // match: (SHRL x (MOVLconst [c])) 21161 // result: (SHRLconst [int8(c&31)] x) 21162 for { 21163 x := v_0 21164 if v_1.Op != OpAMD64MOVLconst { 21165 break 21166 } 21167 c := auxIntToInt32(v_1.AuxInt) 21168 v.reset(OpAMD64SHRLconst) 21169 v.AuxInt = int8ToAuxInt(int8(c & 31)) 21170 v.AddArg(x) 21171 return true 21172 } 21173 // match: (SHRL x (ADDQconst [c] y)) 21174 // cond: c & 31 == 0 21175 // result: (SHRL x y) 21176 for { 21177 x := v_0 21178 if v_1.Op != OpAMD64ADDQconst { 21179 break 21180 } 21181 c := auxIntToInt32(v_1.AuxInt) 21182 y := v_1.Args[0] 21183 if !(c&31 == 0) { 21184 break 21185 } 21186 v.reset(OpAMD64SHRL) 21187 v.AddArg2(x, y) 21188 return true 21189 } 21190 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 21191 // cond: c & 31 == 0 21192 // result: (SHRL x (NEGQ <t> y)) 21193 for { 21194 x := v_0 21195 if v_1.Op != OpAMD64NEGQ { 21196 break 21197 } 21198 t := v_1.Type 21199 v_1_0 := v_1.Args[0] 21200 if v_1_0.Op != OpAMD64ADDQconst { 21201 break 21202 } 21203 c := auxIntToInt32(v_1_0.AuxInt) 21204 y := v_1_0.Args[0] 21205 if !(c&31 == 0) { 21206 break 21207 } 21208 v.reset(OpAMD64SHRL) 21209 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 21210 v0.AddArg(y) 21211 v.AddArg2(x, v0) 21212 return true 21213 } 21214 // match: (SHRL x (ANDQconst [c] y)) 21215 // cond: c & 31 == 31 21216 // result: (SHRL x y) 21217 for { 21218 x := v_0 21219 if v_1.Op != OpAMD64ANDQconst { 21220 break 21221 } 21222 c := auxIntToInt32(v_1.AuxInt) 21223 y := v_1.Args[0] 21224 if !(c&31 == 31) { 21225 break 21226 } 21227 v.reset(OpAMD64SHRL) 21228 v.AddArg2(x, y) 21229 return true 21230 } 21231 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 21232 // cond: c & 31 == 31 21233 // result: (SHRL x (NEGQ <t> y)) 21234 for { 21235 x := v_0 21236 if v_1.Op != OpAMD64NEGQ { 21237 break 21238 } 21239 t := v_1.Type 21240 v_1_0 := v_1.Args[0] 21241 if v_1_0.Op != OpAMD64ANDQconst { 21242 break 21243 } 21244 c := auxIntToInt32(v_1_0.AuxInt) 21245 y := v_1_0.Args[0] 21246 if !(c&31 == 31) { 21247 break 21248 } 21249 v.reset(OpAMD64SHRL) 21250 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 21251 v0.AddArg(y) 21252 v.AddArg2(x, v0) 21253 return true 21254 } 21255 // match: (SHRL x (ADDLconst [c] y)) 21256 // cond: c & 31 == 0 21257 // result: (SHRL x y) 21258 for { 21259 x := v_0 21260 if v_1.Op != OpAMD64ADDLconst { 21261 break 21262 } 21263 c := auxIntToInt32(v_1.AuxInt) 21264 y := v_1.Args[0] 21265 if !(c&31 == 0) { 21266 break 21267 } 21268 v.reset(OpAMD64SHRL) 21269 v.AddArg2(x, y) 21270 return true 21271 } 21272 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 21273 // cond: c & 31 == 0 21274 // result: (SHRL x (NEGL <t> y)) 21275 for { 21276 x := v_0 21277 if v_1.Op != OpAMD64NEGL { 21278 break 21279 } 21280 t := v_1.Type 21281 v_1_0 := v_1.Args[0] 21282 if v_1_0.Op != OpAMD64ADDLconst { 21283 break 21284 } 21285 c := auxIntToInt32(v_1_0.AuxInt) 21286 y := v_1_0.Args[0] 21287 if !(c&31 == 0) { 21288 break 21289 } 21290 v.reset(OpAMD64SHRL) 21291 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 21292 v0.AddArg(y) 21293 v.AddArg2(x, v0) 21294 return true 21295 } 21296 // match: (SHRL x (ANDLconst [c] y)) 21297 // cond: c & 31 == 31 21298 // result: (SHRL x y) 21299 for { 21300 x := v_0 21301 if v_1.Op != OpAMD64ANDLconst { 21302 break 21303 } 21304 c := auxIntToInt32(v_1.AuxInt) 21305 y := v_1.Args[0] 21306 if !(c&31 == 31) { 21307 break 21308 } 21309 v.reset(OpAMD64SHRL) 21310 v.AddArg2(x, y) 21311 return true 21312 } 21313 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 21314 // cond: c & 31 == 31 21315 // result: (SHRL x (NEGL <t> y)) 21316 for { 21317 x := v_0 21318 if v_1.Op != OpAMD64NEGL { 21319 break 21320 } 21321 t := v_1.Type 21322 v_1_0 := v_1.Args[0] 21323 if v_1_0.Op != OpAMD64ANDLconst { 21324 break 21325 } 21326 c := auxIntToInt32(v_1_0.AuxInt) 21327 y := v_1_0.Args[0] 21328 if !(c&31 == 31) { 21329 break 21330 } 21331 v.reset(OpAMD64SHRL) 21332 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 21333 v0.AddArg(y) 21334 v.AddArg2(x, v0) 21335 return true 21336 } 21337 // match: (SHRL l:(MOVLload [off] {sym} ptr mem) x) 21338 // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) 21339 // result: (SHRXLload [off] {sym} ptr x mem) 21340 for { 21341 l := v_0 21342 if l.Op != OpAMD64MOVLload { 21343 break 21344 } 21345 off := auxIntToInt32(l.AuxInt) 21346 sym := auxToSym(l.Aux) 21347 mem := l.Args[1] 21348 ptr := l.Args[0] 21349 x := v_1 21350 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { 21351 break 21352 } 21353 v.reset(OpAMD64SHRXLload) 21354 v.AuxInt = int32ToAuxInt(off) 21355 v.Aux = symToAux(sym) 21356 v.AddArg3(ptr, x, mem) 21357 return true 21358 } 21359 return false 21360 } 21361 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool { 21362 v_0 := v.Args[0] 21363 // match: (SHRLconst [1] (SHLLconst [1] x)) 21364 // result: (ANDLconst [0x7fffffff] x) 21365 for { 21366 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 { 21367 break 21368 } 21369 x := v_0.Args[0] 21370 v.reset(OpAMD64ANDLconst) 21371 v.AuxInt = int32ToAuxInt(0x7fffffff) 21372 v.AddArg(x) 21373 return true 21374 } 21375 // match: (SHRLconst x [0]) 21376 // result: x 21377 for { 21378 if auxIntToInt8(v.AuxInt) != 0 { 21379 break 21380 } 21381 x := v_0 21382 v.copyOf(x) 21383 return true 21384 } 21385 return false 21386 } 21387 func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { 21388 v_1 := v.Args[1] 21389 v_0 := v.Args[0] 21390 b := v.Block 21391 // match: (SHRQ x (MOVQconst [c])) 21392 // result: (SHRQconst [int8(c&63)] x) 21393 for { 21394 x := v_0 21395 if v_1.Op != OpAMD64MOVQconst { 21396 break 21397 } 21398 c := auxIntToInt64(v_1.AuxInt) 21399 v.reset(OpAMD64SHRQconst) 21400 v.AuxInt = int8ToAuxInt(int8(c & 63)) 21401 v.AddArg(x) 21402 return true 21403 } 21404 // match: (SHRQ x (MOVLconst [c])) 21405 // result: (SHRQconst [int8(c&63)] x) 21406 for { 21407 x := v_0 21408 if v_1.Op != OpAMD64MOVLconst { 21409 break 21410 } 21411 c := auxIntToInt32(v_1.AuxInt) 21412 v.reset(OpAMD64SHRQconst) 21413 v.AuxInt = int8ToAuxInt(int8(c & 63)) 21414 v.AddArg(x) 21415 return true 21416 } 21417 // match: (SHRQ x (ADDQconst [c] y)) 21418 // cond: c & 63 == 0 21419 // result: (SHRQ x y) 21420 for { 21421 x := v_0 21422 if v_1.Op != OpAMD64ADDQconst { 21423 break 21424 } 21425 c := auxIntToInt32(v_1.AuxInt) 21426 y := v_1.Args[0] 21427 if !(c&63 == 0) { 21428 break 21429 } 21430 v.reset(OpAMD64SHRQ) 21431 v.AddArg2(x, y) 21432 return true 21433 } 21434 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 21435 // cond: c & 63 == 0 21436 // result: (SHRQ x (NEGQ <t> y)) 21437 for { 21438 x := v_0 21439 if v_1.Op != OpAMD64NEGQ { 21440 break 21441 } 21442 t := v_1.Type 21443 v_1_0 := v_1.Args[0] 21444 if v_1_0.Op != OpAMD64ADDQconst { 21445 break 21446 } 21447 c := auxIntToInt32(v_1_0.AuxInt) 21448 y := v_1_0.Args[0] 21449 if !(c&63 == 0) { 21450 break 21451 } 21452 v.reset(OpAMD64SHRQ) 21453 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 21454 v0.AddArg(y) 21455 v.AddArg2(x, v0) 21456 return true 21457 } 21458 // match: (SHRQ x (ANDQconst [c] y)) 21459 // cond: c & 63 == 63 21460 // result: (SHRQ x y) 21461 for { 21462 x := v_0 21463 if v_1.Op != OpAMD64ANDQconst { 21464 break 21465 } 21466 c := auxIntToInt32(v_1.AuxInt) 21467 y := v_1.Args[0] 21468 if !(c&63 == 63) { 21469 break 21470 } 21471 v.reset(OpAMD64SHRQ) 21472 v.AddArg2(x, y) 21473 return true 21474 } 21475 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 21476 // cond: c & 63 == 63 21477 // result: (SHRQ x (NEGQ <t> y)) 21478 for { 21479 x := v_0 21480 if v_1.Op != OpAMD64NEGQ { 21481 break 21482 } 21483 t := v_1.Type 21484 v_1_0 := v_1.Args[0] 21485 if v_1_0.Op != OpAMD64ANDQconst { 21486 break 21487 } 21488 c := auxIntToInt32(v_1_0.AuxInt) 21489 y := v_1_0.Args[0] 21490 if !(c&63 == 63) { 21491 break 21492 } 21493 v.reset(OpAMD64SHRQ) 21494 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 21495 v0.AddArg(y) 21496 v.AddArg2(x, v0) 21497 return true 21498 } 21499 // match: (SHRQ x (ADDLconst [c] y)) 21500 // cond: c & 63 == 0 21501 // result: (SHRQ x y) 21502 for { 21503 x := v_0 21504 if v_1.Op != OpAMD64ADDLconst { 21505 break 21506 } 21507 c := auxIntToInt32(v_1.AuxInt) 21508 y := v_1.Args[0] 21509 if !(c&63 == 0) { 21510 break 21511 } 21512 v.reset(OpAMD64SHRQ) 21513 v.AddArg2(x, y) 21514 return true 21515 } 21516 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 21517 // cond: c & 63 == 0 21518 // result: (SHRQ x (NEGL <t> y)) 21519 for { 21520 x := v_0 21521 if v_1.Op != OpAMD64NEGL { 21522 break 21523 } 21524 t := v_1.Type 21525 v_1_0 := v_1.Args[0] 21526 if v_1_0.Op != OpAMD64ADDLconst { 21527 break 21528 } 21529 c := auxIntToInt32(v_1_0.AuxInt) 21530 y := v_1_0.Args[0] 21531 if !(c&63 == 0) { 21532 break 21533 } 21534 v.reset(OpAMD64SHRQ) 21535 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 21536 v0.AddArg(y) 21537 v.AddArg2(x, v0) 21538 return true 21539 } 21540 // match: (SHRQ x (ANDLconst [c] y)) 21541 // cond: c & 63 == 63 21542 // result: (SHRQ x y) 21543 for { 21544 x := v_0 21545 if v_1.Op != OpAMD64ANDLconst { 21546 break 21547 } 21548 c := auxIntToInt32(v_1.AuxInt) 21549 y := v_1.Args[0] 21550 if !(c&63 == 63) { 21551 break 21552 } 21553 v.reset(OpAMD64SHRQ) 21554 v.AddArg2(x, y) 21555 return true 21556 } 21557 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 21558 // cond: c & 63 == 63 21559 // result: (SHRQ x (NEGL <t> y)) 21560 for { 21561 x := v_0 21562 if v_1.Op != OpAMD64NEGL { 21563 break 21564 } 21565 t := v_1.Type 21566 v_1_0 := v_1.Args[0] 21567 if v_1_0.Op != OpAMD64ANDLconst { 21568 break 21569 } 21570 c := auxIntToInt32(v_1_0.AuxInt) 21571 y := v_1_0.Args[0] 21572 if !(c&63 == 63) { 21573 break 21574 } 21575 v.reset(OpAMD64SHRQ) 21576 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 21577 v0.AddArg(y) 21578 v.AddArg2(x, v0) 21579 return true 21580 } 21581 // match: (SHRQ l:(MOVQload [off] {sym} ptr mem) x) 21582 // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) 21583 // result: (SHRXQload [off] {sym} ptr x mem) 21584 for { 21585 l := v_0 21586 if l.Op != OpAMD64MOVQload { 21587 break 21588 } 21589 off := auxIntToInt32(l.AuxInt) 21590 sym := auxToSym(l.Aux) 21591 mem := l.Args[1] 21592 ptr := l.Args[0] 21593 x := v_1 21594 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { 21595 break 21596 } 21597 v.reset(OpAMD64SHRXQload) 21598 v.AuxInt = int32ToAuxInt(off) 21599 v.Aux = symToAux(sym) 21600 v.AddArg3(ptr, x, mem) 21601 return true 21602 } 21603 return false 21604 } 21605 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool { 21606 v_0 := v.Args[0] 21607 // match: (SHRQconst [1] (SHLQconst [1] x)) 21608 // result: (BTRQconst [63] x) 21609 for { 21610 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 { 21611 break 21612 } 21613 x := v_0.Args[0] 21614 v.reset(OpAMD64BTRQconst) 21615 v.AuxInt = int8ToAuxInt(63) 21616 v.AddArg(x) 21617 return true 21618 } 21619 // match: (SHRQconst x [0]) 21620 // result: x 21621 for { 21622 if auxIntToInt8(v.AuxInt) != 0 { 21623 break 21624 } 21625 x := v_0 21626 v.copyOf(x) 21627 return true 21628 } 21629 return false 21630 } 21631 func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool { 21632 v_1 := v.Args[1] 21633 v_0 := v.Args[0] 21634 // match: (SHRW x (MOVQconst [c])) 21635 // cond: c&31 < 16 21636 // result: (SHRWconst [int8(c&31)] x) 21637 for { 21638 x := v_0 21639 if v_1.Op != OpAMD64MOVQconst { 21640 break 21641 } 21642 c := auxIntToInt64(v_1.AuxInt) 21643 if !(c&31 < 16) { 21644 break 21645 } 21646 v.reset(OpAMD64SHRWconst) 21647 v.AuxInt = int8ToAuxInt(int8(c & 31)) 21648 v.AddArg(x) 21649 return true 21650 } 21651 // match: (SHRW x (MOVLconst [c])) 21652 // cond: c&31 < 16 21653 // result: (SHRWconst [int8(c&31)] x) 21654 for { 21655 x := v_0 21656 if v_1.Op != OpAMD64MOVLconst { 21657 break 21658 } 21659 c := auxIntToInt32(v_1.AuxInt) 21660 if !(c&31 < 16) { 21661 break 21662 } 21663 v.reset(OpAMD64SHRWconst) 21664 v.AuxInt = int8ToAuxInt(int8(c & 31)) 21665 v.AddArg(x) 21666 return true 21667 } 21668 // match: (SHRW _ (MOVQconst [c])) 21669 // cond: c&31 >= 16 21670 // result: (MOVLconst [0]) 21671 for { 21672 if v_1.Op != OpAMD64MOVQconst { 21673 break 21674 } 21675 c := auxIntToInt64(v_1.AuxInt) 21676 if !(c&31 >= 16) { 21677 break 21678 } 21679 v.reset(OpAMD64MOVLconst) 21680 v.AuxInt = int32ToAuxInt(0) 21681 return true 21682 } 21683 // match: (SHRW _ (MOVLconst [c])) 21684 // cond: c&31 >= 16 21685 // result: (MOVLconst [0]) 21686 for { 21687 if v_1.Op != OpAMD64MOVLconst { 21688 break 21689 } 21690 c := auxIntToInt32(v_1.AuxInt) 21691 if !(c&31 >= 16) { 21692 break 21693 } 21694 v.reset(OpAMD64MOVLconst) 21695 v.AuxInt = int32ToAuxInt(0) 21696 return true 21697 } 21698 return false 21699 } 21700 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool { 21701 v_0 := v.Args[0] 21702 // match: (SHRWconst x [0]) 21703 // result: x 21704 for { 21705 if auxIntToInt8(v.AuxInt) != 0 { 21706 break 21707 } 21708 x := v_0 21709 v.copyOf(x) 21710 return true 21711 } 21712 return false 21713 } 21714 func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool { 21715 v_2 := v.Args[2] 21716 v_1 := v.Args[1] 21717 v_0 := v.Args[0] 21718 b := v.Block 21719 typ := &b.Func.Config.Types 21720 // match: (SHRXLload [off] {sym} ptr (MOVLconst [c]) mem) 21721 // result: (SHRLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem)) 21722 for { 21723 off := auxIntToInt32(v.AuxInt) 21724 sym := auxToSym(v.Aux) 21725 ptr := v_0 21726 if v_1.Op != OpAMD64MOVLconst { 21727 break 21728 } 21729 c := auxIntToInt32(v_1.AuxInt) 21730 mem := v_2 21731 v.reset(OpAMD64SHRLconst) 21732 v.AuxInt = int8ToAuxInt(int8(c & 31)) 21733 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 21734 v0.AuxInt = int32ToAuxInt(off) 21735 v0.Aux = symToAux(sym) 21736 v0.AddArg2(ptr, mem) 21737 v.AddArg(v0) 21738 return true 21739 } 21740 return false 21741 } 21742 func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool { 21743 v_2 := v.Args[2] 21744 v_1 := v.Args[1] 21745 v_0 := v.Args[0] 21746 b := v.Block 21747 typ := &b.Func.Config.Types 21748 // match: (SHRXQload [off] {sym} ptr (MOVQconst [c]) mem) 21749 // result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) 21750 for { 21751 off := auxIntToInt32(v.AuxInt) 21752 sym := auxToSym(v.Aux) 21753 ptr := v_0 21754 if v_1.Op != OpAMD64MOVQconst { 21755 break 21756 } 21757 c := auxIntToInt64(v_1.AuxInt) 21758 mem := v_2 21759 v.reset(OpAMD64SHRQconst) 21760 v.AuxInt = int8ToAuxInt(int8(c & 63)) 21761 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 21762 v0.AuxInt = int32ToAuxInt(off) 21763 v0.Aux = symToAux(sym) 21764 v0.AddArg2(ptr, mem) 21765 v.AddArg(v0) 21766 return true 21767 } 21768 // match: (SHRXQload [off] {sym} ptr (MOVLconst [c]) mem) 21769 // result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) 21770 for { 21771 off := auxIntToInt32(v.AuxInt) 21772 sym := auxToSym(v.Aux) 21773 ptr := v_0 21774 if v_1.Op != OpAMD64MOVLconst { 21775 break 21776 } 21777 c := auxIntToInt32(v_1.AuxInt) 21778 mem := v_2 21779 v.reset(OpAMD64SHRQconst) 21780 v.AuxInt = int8ToAuxInt(int8(c & 63)) 21781 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 21782 v0.AuxInt = int32ToAuxInt(off) 21783 v0.Aux = symToAux(sym) 21784 v0.AddArg2(ptr, mem) 21785 v.AddArg(v0) 21786 return true 21787 } 21788 return false 21789 } 21790 func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool { 21791 v_1 := v.Args[1] 21792 v_0 := v.Args[0] 21793 b := v.Block 21794 // match: (SUBL x (MOVLconst [c])) 21795 // result: (SUBLconst x [c]) 21796 for { 21797 x := v_0 21798 if v_1.Op != OpAMD64MOVLconst { 21799 break 21800 } 21801 c := auxIntToInt32(v_1.AuxInt) 21802 v.reset(OpAMD64SUBLconst) 21803 v.AuxInt = int32ToAuxInt(c) 21804 v.AddArg(x) 21805 return true 21806 } 21807 // match: (SUBL (MOVLconst [c]) x) 21808 // result: (NEGL (SUBLconst <v.Type> x [c])) 21809 for { 21810 if v_0.Op != OpAMD64MOVLconst { 21811 break 21812 } 21813 c := auxIntToInt32(v_0.AuxInt) 21814 x := v_1 21815 v.reset(OpAMD64NEGL) 21816 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 21817 v0.AuxInt = int32ToAuxInt(c) 21818 v0.AddArg(x) 21819 v.AddArg(v0) 21820 return true 21821 } 21822 // match: (SUBL x x) 21823 // result: (MOVLconst [0]) 21824 for { 21825 x := v_0 21826 if x != v_1 { 21827 break 21828 } 21829 v.reset(OpAMD64MOVLconst) 21830 v.AuxInt = int32ToAuxInt(0) 21831 return true 21832 } 21833 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 21834 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 21835 // result: (SUBLload x [off] {sym} ptr mem) 21836 for { 21837 x := v_0 21838 l := v_1 21839 if l.Op != OpAMD64MOVLload { 21840 break 21841 } 21842 off := auxIntToInt32(l.AuxInt) 21843 sym := auxToSym(l.Aux) 21844 mem := l.Args[1] 21845 ptr := l.Args[0] 21846 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 21847 break 21848 } 21849 v.reset(OpAMD64SUBLload) 21850 v.AuxInt = int32ToAuxInt(off) 21851 v.Aux = symToAux(sym) 21852 v.AddArg3(x, ptr, mem) 21853 return true 21854 } 21855 return false 21856 } 21857 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool { 21858 v_0 := v.Args[0] 21859 // match: (SUBLconst [c] x) 21860 // cond: c==0 21861 // result: x 21862 for { 21863 c := auxIntToInt32(v.AuxInt) 21864 x := v_0 21865 if !(c == 0) { 21866 break 21867 } 21868 v.copyOf(x) 21869 return true 21870 } 21871 // match: (SUBLconst [c] x) 21872 // result: (ADDLconst [-c] x) 21873 for { 21874 c := auxIntToInt32(v.AuxInt) 21875 x := v_0 21876 v.reset(OpAMD64ADDLconst) 21877 v.AuxInt = int32ToAuxInt(-c) 21878 v.AddArg(x) 21879 return true 21880 } 21881 } 21882 func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { 21883 v_2 := v.Args[2] 21884 v_1 := v.Args[1] 21885 v_0 := v.Args[0] 21886 b := v.Block 21887 typ := &b.Func.Config.Types 21888 // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem) 21889 // cond: is32Bit(int64(off1)+int64(off2)) 21890 // result: (SUBLload [off1+off2] {sym} val base mem) 21891 for { 21892 off1 := auxIntToInt32(v.AuxInt) 21893 sym := auxToSym(v.Aux) 21894 val := v_0 21895 if v_1.Op != OpAMD64ADDQconst { 21896 break 21897 } 21898 off2 := auxIntToInt32(v_1.AuxInt) 21899 base := v_1.Args[0] 21900 mem := v_2 21901 if !(is32Bit(int64(off1) + int64(off2))) { 21902 break 21903 } 21904 v.reset(OpAMD64SUBLload) 21905 v.AuxInt = int32ToAuxInt(off1 + off2) 21906 v.Aux = symToAux(sym) 21907 v.AddArg3(val, base, mem) 21908 return true 21909 } 21910 // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 21911 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 21912 // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 21913 for { 21914 off1 := auxIntToInt32(v.AuxInt) 21915 sym1 := auxToSym(v.Aux) 21916 val := v_0 21917 if v_1.Op != OpAMD64LEAQ { 21918 break 21919 } 21920 off2 := auxIntToInt32(v_1.AuxInt) 21921 sym2 := auxToSym(v_1.Aux) 21922 base := v_1.Args[0] 21923 mem := v_2 21924 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 21925 break 21926 } 21927 v.reset(OpAMD64SUBLload) 21928 v.AuxInt = int32ToAuxInt(off1 + off2) 21929 v.Aux = symToAux(mergeSym(sym1, sym2)) 21930 v.AddArg3(val, base, mem) 21931 return true 21932 } 21933 // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 21934 // result: (SUBL x (MOVLf2i y)) 21935 for { 21936 off := auxIntToInt32(v.AuxInt) 21937 sym := auxToSym(v.Aux) 21938 x := v_0 21939 ptr := v_1 21940 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 21941 break 21942 } 21943 y := v_2.Args[1] 21944 if ptr != v_2.Args[0] { 21945 break 21946 } 21947 v.reset(OpAMD64SUBL) 21948 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 21949 v0.AddArg(y) 21950 v.AddArg2(x, v0) 21951 return true 21952 } 21953 return false 21954 } 21955 func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool { 21956 v_2 := v.Args[2] 21957 v_1 := v.Args[1] 21958 v_0 := v.Args[0] 21959 // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 21960 // cond: is32Bit(int64(off1)+int64(off2)) 21961 // result: (SUBLmodify [off1+off2] {sym} base val mem) 21962 for { 21963 off1 := auxIntToInt32(v.AuxInt) 21964 sym := auxToSym(v.Aux) 21965 if v_0.Op != OpAMD64ADDQconst { 21966 break 21967 } 21968 off2 := auxIntToInt32(v_0.AuxInt) 21969 base := v_0.Args[0] 21970 val := v_1 21971 mem := v_2 21972 if !(is32Bit(int64(off1) + int64(off2))) { 21973 break 21974 } 21975 v.reset(OpAMD64SUBLmodify) 21976 v.AuxInt = int32ToAuxInt(off1 + off2) 21977 v.Aux = symToAux(sym) 21978 v.AddArg3(base, val, mem) 21979 return true 21980 } 21981 // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 21982 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 21983 // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 21984 for { 21985 off1 := auxIntToInt32(v.AuxInt) 21986 sym1 := auxToSym(v.Aux) 21987 if v_0.Op != OpAMD64LEAQ { 21988 break 21989 } 21990 off2 := auxIntToInt32(v_0.AuxInt) 21991 sym2 := auxToSym(v_0.Aux) 21992 base := v_0.Args[0] 21993 val := v_1 21994 mem := v_2 21995 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 21996 break 21997 } 21998 v.reset(OpAMD64SUBLmodify) 21999 v.AuxInt = int32ToAuxInt(off1 + off2) 22000 v.Aux = symToAux(mergeSym(sym1, sym2)) 22001 v.AddArg3(base, val, mem) 22002 return true 22003 } 22004 return false 22005 } 22006 func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool { 22007 v_1 := v.Args[1] 22008 v_0 := v.Args[0] 22009 b := v.Block 22010 // match: (SUBQ x (MOVQconst [c])) 22011 // cond: is32Bit(c) 22012 // result: (SUBQconst x [int32(c)]) 22013 for { 22014 x := v_0 22015 if v_1.Op != OpAMD64MOVQconst { 22016 break 22017 } 22018 c := auxIntToInt64(v_1.AuxInt) 22019 if !(is32Bit(c)) { 22020 break 22021 } 22022 v.reset(OpAMD64SUBQconst) 22023 v.AuxInt = int32ToAuxInt(int32(c)) 22024 v.AddArg(x) 22025 return true 22026 } 22027 // match: (SUBQ (MOVQconst [c]) x) 22028 // cond: is32Bit(c) 22029 // result: (NEGQ (SUBQconst <v.Type> x [int32(c)])) 22030 for { 22031 if v_0.Op != OpAMD64MOVQconst { 22032 break 22033 } 22034 c := auxIntToInt64(v_0.AuxInt) 22035 x := v_1 22036 if !(is32Bit(c)) { 22037 break 22038 } 22039 v.reset(OpAMD64NEGQ) 22040 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 22041 v0.AuxInt = int32ToAuxInt(int32(c)) 22042 v0.AddArg(x) 22043 v.AddArg(v0) 22044 return true 22045 } 22046 // match: (SUBQ x x) 22047 // result: (MOVQconst [0]) 22048 for { 22049 x := v_0 22050 if x != v_1 { 22051 break 22052 } 22053 v.reset(OpAMD64MOVQconst) 22054 v.AuxInt = int64ToAuxInt(0) 22055 return true 22056 } 22057 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 22058 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 22059 // result: (SUBQload x [off] {sym} ptr mem) 22060 for { 22061 x := v_0 22062 l := v_1 22063 if l.Op != OpAMD64MOVQload { 22064 break 22065 } 22066 off := auxIntToInt32(l.AuxInt) 22067 sym := auxToSym(l.Aux) 22068 mem := l.Args[1] 22069 ptr := l.Args[0] 22070 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 22071 break 22072 } 22073 v.reset(OpAMD64SUBQload) 22074 v.AuxInt = int32ToAuxInt(off) 22075 v.Aux = symToAux(sym) 22076 v.AddArg3(x, ptr, mem) 22077 return true 22078 } 22079 return false 22080 } 22081 func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool { 22082 v_1 := v.Args[1] 22083 v_0 := v.Args[0] 22084 // match: (SUBQborrow x (MOVQconst [c])) 22085 // cond: is32Bit(c) 22086 // result: (SUBQconstborrow x [int32(c)]) 22087 for { 22088 x := v_0 22089 if v_1.Op != OpAMD64MOVQconst { 22090 break 22091 } 22092 c := auxIntToInt64(v_1.AuxInt) 22093 if !(is32Bit(c)) { 22094 break 22095 } 22096 v.reset(OpAMD64SUBQconstborrow) 22097 v.AuxInt = int32ToAuxInt(int32(c)) 22098 v.AddArg(x) 22099 return true 22100 } 22101 return false 22102 } 22103 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool { 22104 v_0 := v.Args[0] 22105 // match: (SUBQconst [0] x) 22106 // result: x 22107 for { 22108 if auxIntToInt32(v.AuxInt) != 0 { 22109 break 22110 } 22111 x := v_0 22112 v.copyOf(x) 22113 return true 22114 } 22115 // match: (SUBQconst [c] x) 22116 // cond: c != -(1<<31) 22117 // result: (ADDQconst [-c] x) 22118 for { 22119 c := auxIntToInt32(v.AuxInt) 22120 x := v_0 22121 if !(c != -(1 << 31)) { 22122 break 22123 } 22124 v.reset(OpAMD64ADDQconst) 22125 v.AuxInt = int32ToAuxInt(-c) 22126 v.AddArg(x) 22127 return true 22128 } 22129 // match: (SUBQconst (MOVQconst [d]) [c]) 22130 // result: (MOVQconst [d-int64(c)]) 22131 for { 22132 c := auxIntToInt32(v.AuxInt) 22133 if v_0.Op != OpAMD64MOVQconst { 22134 break 22135 } 22136 d := auxIntToInt64(v_0.AuxInt) 22137 v.reset(OpAMD64MOVQconst) 22138 v.AuxInt = int64ToAuxInt(d - int64(c)) 22139 return true 22140 } 22141 // match: (SUBQconst (SUBQconst x [d]) [c]) 22142 // cond: is32Bit(int64(-c)-int64(d)) 22143 // result: (ADDQconst [-c-d] x) 22144 for { 22145 c := auxIntToInt32(v.AuxInt) 22146 if v_0.Op != OpAMD64SUBQconst { 22147 break 22148 } 22149 d := auxIntToInt32(v_0.AuxInt) 22150 x := v_0.Args[0] 22151 if !(is32Bit(int64(-c) - int64(d))) { 22152 break 22153 } 22154 v.reset(OpAMD64ADDQconst) 22155 v.AuxInt = int32ToAuxInt(-c - d) 22156 v.AddArg(x) 22157 return true 22158 } 22159 return false 22160 } 22161 func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { 22162 v_2 := v.Args[2] 22163 v_1 := v.Args[1] 22164 v_0 := v.Args[0] 22165 b := v.Block 22166 typ := &b.Func.Config.Types 22167 // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem) 22168 // cond: is32Bit(int64(off1)+int64(off2)) 22169 // result: (SUBQload [off1+off2] {sym} val base mem) 22170 for { 22171 off1 := auxIntToInt32(v.AuxInt) 22172 sym := auxToSym(v.Aux) 22173 val := v_0 22174 if v_1.Op != OpAMD64ADDQconst { 22175 break 22176 } 22177 off2 := auxIntToInt32(v_1.AuxInt) 22178 base := v_1.Args[0] 22179 mem := v_2 22180 if !(is32Bit(int64(off1) + int64(off2))) { 22181 break 22182 } 22183 v.reset(OpAMD64SUBQload) 22184 v.AuxInt = int32ToAuxInt(off1 + off2) 22185 v.Aux = symToAux(sym) 22186 v.AddArg3(val, base, mem) 22187 return true 22188 } 22189 // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 22190 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 22191 // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 22192 for { 22193 off1 := auxIntToInt32(v.AuxInt) 22194 sym1 := auxToSym(v.Aux) 22195 val := v_0 22196 if v_1.Op != OpAMD64LEAQ { 22197 break 22198 } 22199 off2 := auxIntToInt32(v_1.AuxInt) 22200 sym2 := auxToSym(v_1.Aux) 22201 base := v_1.Args[0] 22202 mem := v_2 22203 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 22204 break 22205 } 22206 v.reset(OpAMD64SUBQload) 22207 v.AuxInt = int32ToAuxInt(off1 + off2) 22208 v.Aux = symToAux(mergeSym(sym1, sym2)) 22209 v.AddArg3(val, base, mem) 22210 return true 22211 } 22212 // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 22213 // result: (SUBQ x (MOVQf2i y)) 22214 for { 22215 off := auxIntToInt32(v.AuxInt) 22216 sym := auxToSym(v.Aux) 22217 x := v_0 22218 ptr := v_1 22219 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 22220 break 22221 } 22222 y := v_2.Args[1] 22223 if ptr != v_2.Args[0] { 22224 break 22225 } 22226 v.reset(OpAMD64SUBQ) 22227 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 22228 v0.AddArg(y) 22229 v.AddArg2(x, v0) 22230 return true 22231 } 22232 return false 22233 } 22234 func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool { 22235 v_2 := v.Args[2] 22236 v_1 := v.Args[1] 22237 v_0 := v.Args[0] 22238 // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 22239 // cond: is32Bit(int64(off1)+int64(off2)) 22240 // result: (SUBQmodify [off1+off2] {sym} base val mem) 22241 for { 22242 off1 := auxIntToInt32(v.AuxInt) 22243 sym := auxToSym(v.Aux) 22244 if v_0.Op != OpAMD64ADDQconst { 22245 break 22246 } 22247 off2 := auxIntToInt32(v_0.AuxInt) 22248 base := v_0.Args[0] 22249 val := v_1 22250 mem := v_2 22251 if !(is32Bit(int64(off1) + int64(off2))) { 22252 break 22253 } 22254 v.reset(OpAMD64SUBQmodify) 22255 v.AuxInt = int32ToAuxInt(off1 + off2) 22256 v.Aux = symToAux(sym) 22257 v.AddArg3(base, val, mem) 22258 return true 22259 } 22260 // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 22261 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 22262 // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 22263 for { 22264 off1 := auxIntToInt32(v.AuxInt) 22265 sym1 := auxToSym(v.Aux) 22266 if v_0.Op != OpAMD64LEAQ { 22267 break 22268 } 22269 off2 := auxIntToInt32(v_0.AuxInt) 22270 sym2 := auxToSym(v_0.Aux) 22271 base := v_0.Args[0] 22272 val := v_1 22273 mem := v_2 22274 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 22275 break 22276 } 22277 v.reset(OpAMD64SUBQmodify) 22278 v.AuxInt = int32ToAuxInt(off1 + off2) 22279 v.Aux = symToAux(mergeSym(sym1, sym2)) 22280 v.AddArg3(base, val, mem) 22281 return true 22282 } 22283 return false 22284 } 22285 func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool { 22286 v_1 := v.Args[1] 22287 v_0 := v.Args[0] 22288 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 22289 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 22290 // result: (SUBSDload x [off] {sym} ptr mem) 22291 for { 22292 x := v_0 22293 l := v_1 22294 if l.Op != OpAMD64MOVSDload { 22295 break 22296 } 22297 off := auxIntToInt32(l.AuxInt) 22298 sym := auxToSym(l.Aux) 22299 mem := l.Args[1] 22300 ptr := l.Args[0] 22301 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 22302 break 22303 } 22304 v.reset(OpAMD64SUBSDload) 22305 v.AuxInt = int32ToAuxInt(off) 22306 v.Aux = symToAux(sym) 22307 v.AddArg3(x, ptr, mem) 22308 return true 22309 } 22310 return false 22311 } 22312 func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { 22313 v_2 := v.Args[2] 22314 v_1 := v.Args[1] 22315 v_0 := v.Args[0] 22316 b := v.Block 22317 typ := &b.Func.Config.Types 22318 // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem) 22319 // cond: is32Bit(int64(off1)+int64(off2)) 22320 // result: (SUBSDload [off1+off2] {sym} val base mem) 22321 for { 22322 off1 := auxIntToInt32(v.AuxInt) 22323 sym := auxToSym(v.Aux) 22324 val := v_0 22325 if v_1.Op != OpAMD64ADDQconst { 22326 break 22327 } 22328 off2 := auxIntToInt32(v_1.AuxInt) 22329 base := v_1.Args[0] 22330 mem := v_2 22331 if !(is32Bit(int64(off1) + int64(off2))) { 22332 break 22333 } 22334 v.reset(OpAMD64SUBSDload) 22335 v.AuxInt = int32ToAuxInt(off1 + off2) 22336 v.Aux = symToAux(sym) 22337 v.AddArg3(val, base, mem) 22338 return true 22339 } 22340 // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 22341 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 22342 // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 22343 for { 22344 off1 := auxIntToInt32(v.AuxInt) 22345 sym1 := auxToSym(v.Aux) 22346 val := v_0 22347 if v_1.Op != OpAMD64LEAQ { 22348 break 22349 } 22350 off2 := auxIntToInt32(v_1.AuxInt) 22351 sym2 := auxToSym(v_1.Aux) 22352 base := v_1.Args[0] 22353 mem := v_2 22354 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 22355 break 22356 } 22357 v.reset(OpAMD64SUBSDload) 22358 v.AuxInt = int32ToAuxInt(off1 + off2) 22359 v.Aux = symToAux(mergeSym(sym1, sym2)) 22360 v.AddArg3(val, base, mem) 22361 return true 22362 } 22363 // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 22364 // result: (SUBSD x (MOVQi2f y)) 22365 for { 22366 off := auxIntToInt32(v.AuxInt) 22367 sym := auxToSym(v.Aux) 22368 x := v_0 22369 ptr := v_1 22370 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 22371 break 22372 } 22373 y := v_2.Args[1] 22374 if ptr != v_2.Args[0] { 22375 break 22376 } 22377 v.reset(OpAMD64SUBSD) 22378 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) 22379 v0.AddArg(y) 22380 v.AddArg2(x, v0) 22381 return true 22382 } 22383 return false 22384 } 22385 func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool { 22386 v_1 := v.Args[1] 22387 v_0 := v.Args[0] 22388 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 22389 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 22390 // result: (SUBSSload x [off] {sym} ptr mem) 22391 for { 22392 x := v_0 22393 l := v_1 22394 if l.Op != OpAMD64MOVSSload { 22395 break 22396 } 22397 off := auxIntToInt32(l.AuxInt) 22398 sym := auxToSym(l.Aux) 22399 mem := l.Args[1] 22400 ptr := l.Args[0] 22401 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 22402 break 22403 } 22404 v.reset(OpAMD64SUBSSload) 22405 v.AuxInt = int32ToAuxInt(off) 22406 v.Aux = symToAux(sym) 22407 v.AddArg3(x, ptr, mem) 22408 return true 22409 } 22410 return false 22411 } 22412 func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { 22413 v_2 := v.Args[2] 22414 v_1 := v.Args[1] 22415 v_0 := v.Args[0] 22416 b := v.Block 22417 typ := &b.Func.Config.Types 22418 // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem) 22419 // cond: is32Bit(int64(off1)+int64(off2)) 22420 // result: (SUBSSload [off1+off2] {sym} val base mem) 22421 for { 22422 off1 := auxIntToInt32(v.AuxInt) 22423 sym := auxToSym(v.Aux) 22424 val := v_0 22425 if v_1.Op != OpAMD64ADDQconst { 22426 break 22427 } 22428 off2 := auxIntToInt32(v_1.AuxInt) 22429 base := v_1.Args[0] 22430 mem := v_2 22431 if !(is32Bit(int64(off1) + int64(off2))) { 22432 break 22433 } 22434 v.reset(OpAMD64SUBSSload) 22435 v.AuxInt = int32ToAuxInt(off1 + off2) 22436 v.Aux = symToAux(sym) 22437 v.AddArg3(val, base, mem) 22438 return true 22439 } 22440 // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 22441 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 22442 // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 22443 for { 22444 off1 := auxIntToInt32(v.AuxInt) 22445 sym1 := auxToSym(v.Aux) 22446 val := v_0 22447 if v_1.Op != OpAMD64LEAQ { 22448 break 22449 } 22450 off2 := auxIntToInt32(v_1.AuxInt) 22451 sym2 := auxToSym(v_1.Aux) 22452 base := v_1.Args[0] 22453 mem := v_2 22454 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 22455 break 22456 } 22457 v.reset(OpAMD64SUBSSload) 22458 v.AuxInt = int32ToAuxInt(off1 + off2) 22459 v.Aux = symToAux(mergeSym(sym1, sym2)) 22460 v.AddArg3(val, base, mem) 22461 return true 22462 } 22463 // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 22464 // result: (SUBSS x (MOVLi2f y)) 22465 for { 22466 off := auxIntToInt32(v.AuxInt) 22467 sym := auxToSym(v.Aux) 22468 x := v_0 22469 ptr := v_1 22470 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 22471 break 22472 } 22473 y := v_2.Args[1] 22474 if ptr != v_2.Args[0] { 22475 break 22476 } 22477 v.reset(OpAMD64SUBSS) 22478 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) 22479 v0.AddArg(y) 22480 v.AddArg2(x, v0) 22481 return true 22482 } 22483 return false 22484 } 22485 func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool { 22486 v_1 := v.Args[1] 22487 v_0 := v.Args[0] 22488 b := v.Block 22489 // match: (TESTB (MOVLconst [c]) x) 22490 // result: (TESTBconst [int8(c)] x) 22491 for { 22492 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 22493 if v_0.Op != OpAMD64MOVLconst { 22494 continue 22495 } 22496 c := auxIntToInt32(v_0.AuxInt) 22497 x := v_1 22498 v.reset(OpAMD64TESTBconst) 22499 v.AuxInt = int8ToAuxInt(int8(c)) 22500 v.AddArg(x) 22501 return true 22502 } 22503 break 22504 } 22505 // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2) 22506 // cond: l == l2 && l.Uses == 2 && clobber(l) 22507 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0, off)] ptr mem) 22508 for { 22509 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 22510 l := v_0 22511 if l.Op != OpAMD64MOVBload { 22512 continue 22513 } 22514 off := auxIntToInt32(l.AuxInt) 22515 sym := auxToSym(l.Aux) 22516 mem := l.Args[1] 22517 ptr := l.Args[0] 22518 l2 := v_1 22519 if !(l == l2 && l.Uses == 2 && clobber(l)) { 22520 continue 22521 } 22522 b = l.Block 22523 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) 22524 v.copyOf(v0) 22525 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) 22526 v0.Aux = symToAux(sym) 22527 v0.AddArg2(ptr, mem) 22528 return true 22529 } 22530 break 22531 } 22532 return false 22533 } 22534 func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool { 22535 v_0 := v.Args[0] 22536 // match: (TESTBconst [-1] x) 22537 // cond: x.Op != OpAMD64MOVLconst 22538 // result: (TESTB x x) 22539 for { 22540 if auxIntToInt8(v.AuxInt) != -1 { 22541 break 22542 } 22543 x := v_0 22544 if !(x.Op != OpAMD64MOVLconst) { 22545 break 22546 } 22547 v.reset(OpAMD64TESTB) 22548 v.AddArg2(x, x) 22549 return true 22550 } 22551 return false 22552 } 22553 func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { 22554 v_1 := v.Args[1] 22555 v_0 := v.Args[0] 22556 b := v.Block 22557 // match: (TESTL (MOVLconst [c]) x) 22558 // result: (TESTLconst [c] x) 22559 for { 22560 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 22561 if v_0.Op != OpAMD64MOVLconst { 22562 continue 22563 } 22564 c := auxIntToInt32(v_0.AuxInt) 22565 x := v_1 22566 v.reset(OpAMD64TESTLconst) 22567 v.AuxInt = int32ToAuxInt(c) 22568 v.AddArg(x) 22569 return true 22570 } 22571 break 22572 } 22573 // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2) 22574 // cond: l == l2 && l.Uses == 2 && clobber(l) 22575 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0, off)] ptr mem) 22576 for { 22577 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 22578 l := v_0 22579 if l.Op != OpAMD64MOVLload { 22580 continue 22581 } 22582 off := auxIntToInt32(l.AuxInt) 22583 sym := auxToSym(l.Aux) 22584 mem := l.Args[1] 22585 ptr := l.Args[0] 22586 l2 := v_1 22587 if !(l == l2 && l.Uses == 2 && clobber(l)) { 22588 continue 22589 } 22590 b = l.Block 22591 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) 22592 v.copyOf(v0) 22593 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) 22594 v0.Aux = symToAux(sym) 22595 v0.AddArg2(ptr, mem) 22596 return true 22597 } 22598 break 22599 } 22600 // match: (TESTL a:(ANDLload [off] {sym} x ptr mem) a) 22601 // cond: a.Uses == 2 && a.Block == v.Block && clobber(a) 22602 // result: (TESTL (MOVLload <a.Type> [off] {sym} ptr mem) x) 22603 for { 22604 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 22605 a := v_0 22606 if a.Op != OpAMD64ANDLload { 22607 continue 22608 } 22609 off := auxIntToInt32(a.AuxInt) 22610 sym := auxToSym(a.Aux) 22611 mem := a.Args[2] 22612 x := a.Args[0] 22613 ptr := a.Args[1] 22614 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) { 22615 continue 22616 } 22617 v.reset(OpAMD64TESTL) 22618 v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type) 22619 v0.AuxInt = int32ToAuxInt(off) 22620 v0.Aux = symToAux(sym) 22621 v0.AddArg2(ptr, mem) 22622 v.AddArg2(v0, x) 22623 return true 22624 } 22625 break 22626 } 22627 return false 22628 } 22629 func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool { 22630 v_0 := v.Args[0] 22631 // match: (TESTLconst [c] (MOVLconst [c])) 22632 // cond: c == 0 22633 // result: (FlagEQ) 22634 for { 22635 c := auxIntToInt32(v.AuxInt) 22636 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) { 22637 break 22638 } 22639 v.reset(OpAMD64FlagEQ) 22640 return true 22641 } 22642 // match: (TESTLconst [c] (MOVLconst [c])) 22643 // cond: c < 0 22644 // result: (FlagLT_UGT) 22645 for { 22646 c := auxIntToInt32(v.AuxInt) 22647 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) { 22648 break 22649 } 22650 v.reset(OpAMD64FlagLT_UGT) 22651 return true 22652 } 22653 // match: (TESTLconst [c] (MOVLconst [c])) 22654 // cond: c > 0 22655 // result: (FlagGT_UGT) 22656 for { 22657 c := auxIntToInt32(v.AuxInt) 22658 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) { 22659 break 22660 } 22661 v.reset(OpAMD64FlagGT_UGT) 22662 return true 22663 } 22664 // match: (TESTLconst [-1] x) 22665 // cond: x.Op != OpAMD64MOVLconst 22666 // result: (TESTL x x) 22667 for { 22668 if auxIntToInt32(v.AuxInt) != -1 { 22669 break 22670 } 22671 x := v_0 22672 if !(x.Op != OpAMD64MOVLconst) { 22673 break 22674 } 22675 v.reset(OpAMD64TESTL) 22676 v.AddArg2(x, x) 22677 return true 22678 } 22679 return false 22680 } 22681 func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { 22682 v_1 := v.Args[1] 22683 v_0 := v.Args[0] 22684 b := v.Block 22685 // match: (TESTQ (MOVQconst [c]) x) 22686 // cond: is32Bit(c) 22687 // result: (TESTQconst [int32(c)] x) 22688 for { 22689 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 22690 if v_0.Op != OpAMD64MOVQconst { 22691 continue 22692 } 22693 c := auxIntToInt64(v_0.AuxInt) 22694 x := v_1 22695 if !(is32Bit(c)) { 22696 continue 22697 } 22698 v.reset(OpAMD64TESTQconst) 22699 v.AuxInt = int32ToAuxInt(int32(c)) 22700 v.AddArg(x) 22701 return true 22702 } 22703 break 22704 } 22705 // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2) 22706 // cond: l == l2 && l.Uses == 2 && clobber(l) 22707 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0, off)] ptr mem) 22708 for { 22709 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 22710 l := v_0 22711 if l.Op != OpAMD64MOVQload { 22712 continue 22713 } 22714 off := auxIntToInt32(l.AuxInt) 22715 sym := auxToSym(l.Aux) 22716 mem := l.Args[1] 22717 ptr := l.Args[0] 22718 l2 := v_1 22719 if !(l == l2 && l.Uses == 2 && clobber(l)) { 22720 continue 22721 } 22722 b = l.Block 22723 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) 22724 v.copyOf(v0) 22725 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) 22726 v0.Aux = symToAux(sym) 22727 v0.AddArg2(ptr, mem) 22728 return true 22729 } 22730 break 22731 } 22732 // match: (TESTQ a:(ANDQload [off] {sym} x ptr mem) a) 22733 // cond: a.Uses == 2 && a.Block == v.Block && clobber(a) 22734 // result: (TESTQ (MOVQload <a.Type> [off] {sym} ptr mem) x) 22735 for { 22736 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 22737 a := v_0 22738 if a.Op != OpAMD64ANDQload { 22739 continue 22740 } 22741 off := auxIntToInt32(a.AuxInt) 22742 sym := auxToSym(a.Aux) 22743 mem := a.Args[2] 22744 x := a.Args[0] 22745 ptr := a.Args[1] 22746 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) { 22747 continue 22748 } 22749 v.reset(OpAMD64TESTQ) 22750 v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type) 22751 v0.AuxInt = int32ToAuxInt(off) 22752 v0.Aux = symToAux(sym) 22753 v0.AddArg2(ptr, mem) 22754 v.AddArg2(v0, x) 22755 return true 22756 } 22757 break 22758 } 22759 return false 22760 } 22761 func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool { 22762 v_0 := v.Args[0] 22763 // match: (TESTQconst [c] (MOVQconst [d])) 22764 // cond: int64(c) == d && c == 0 22765 // result: (FlagEQ) 22766 for { 22767 c := auxIntToInt32(v.AuxInt) 22768 if v_0.Op != OpAMD64MOVQconst { 22769 break 22770 } 22771 d := auxIntToInt64(v_0.AuxInt) 22772 if !(int64(c) == d && c == 0) { 22773 break 22774 } 22775 v.reset(OpAMD64FlagEQ) 22776 return true 22777 } 22778 // match: (TESTQconst [c] (MOVQconst [d])) 22779 // cond: int64(c) == d && c < 0 22780 // result: (FlagLT_UGT) 22781 for { 22782 c := auxIntToInt32(v.AuxInt) 22783 if v_0.Op != OpAMD64MOVQconst { 22784 break 22785 } 22786 d := auxIntToInt64(v_0.AuxInt) 22787 if !(int64(c) == d && c < 0) { 22788 break 22789 } 22790 v.reset(OpAMD64FlagLT_UGT) 22791 return true 22792 } 22793 // match: (TESTQconst [c] (MOVQconst [d])) 22794 // cond: int64(c) == d && c > 0 22795 // result: (FlagGT_UGT) 22796 for { 22797 c := auxIntToInt32(v.AuxInt) 22798 if v_0.Op != OpAMD64MOVQconst { 22799 break 22800 } 22801 d := auxIntToInt64(v_0.AuxInt) 22802 if !(int64(c) == d && c > 0) { 22803 break 22804 } 22805 v.reset(OpAMD64FlagGT_UGT) 22806 return true 22807 } 22808 // match: (TESTQconst [-1] x) 22809 // cond: x.Op != OpAMD64MOVQconst 22810 // result: (TESTQ x x) 22811 for { 22812 if auxIntToInt32(v.AuxInt) != -1 { 22813 break 22814 } 22815 x := v_0 22816 if !(x.Op != OpAMD64MOVQconst) { 22817 break 22818 } 22819 v.reset(OpAMD64TESTQ) 22820 v.AddArg2(x, x) 22821 return true 22822 } 22823 return false 22824 } 22825 func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool { 22826 v_1 := v.Args[1] 22827 v_0 := v.Args[0] 22828 b := v.Block 22829 // match: (TESTW (MOVLconst [c]) x) 22830 // result: (TESTWconst [int16(c)] x) 22831 for { 22832 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 22833 if v_0.Op != OpAMD64MOVLconst { 22834 continue 22835 } 22836 c := auxIntToInt32(v_0.AuxInt) 22837 x := v_1 22838 v.reset(OpAMD64TESTWconst) 22839 v.AuxInt = int16ToAuxInt(int16(c)) 22840 v.AddArg(x) 22841 return true 22842 } 22843 break 22844 } 22845 // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2) 22846 // cond: l == l2 && l.Uses == 2 && clobber(l) 22847 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0, off)] ptr mem) 22848 for { 22849 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 22850 l := v_0 22851 if l.Op != OpAMD64MOVWload { 22852 continue 22853 } 22854 off := auxIntToInt32(l.AuxInt) 22855 sym := auxToSym(l.Aux) 22856 mem := l.Args[1] 22857 ptr := l.Args[0] 22858 l2 := v_1 22859 if !(l == l2 && l.Uses == 2 && clobber(l)) { 22860 continue 22861 } 22862 b = l.Block 22863 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) 22864 v.copyOf(v0) 22865 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) 22866 v0.Aux = symToAux(sym) 22867 v0.AddArg2(ptr, mem) 22868 return true 22869 } 22870 break 22871 } 22872 return false 22873 } 22874 func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { 22875 v_0 := v.Args[0] 22876 // match: (TESTWconst [-1] x) 22877 // cond: x.Op != OpAMD64MOVLconst 22878 // result: (TESTW x x) 22879 for { 22880 if auxIntToInt16(v.AuxInt) != -1 { 22881 break 22882 } 22883 x := v_0 22884 if !(x.Op != OpAMD64MOVLconst) { 22885 break 22886 } 22887 v.reset(OpAMD64TESTW) 22888 v.AddArg2(x, x) 22889 return true 22890 } 22891 return false 22892 } 22893 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { 22894 v_2 := v.Args[2] 22895 v_1 := v.Args[1] 22896 v_0 := v.Args[0] 22897 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 22898 // cond: is32Bit(int64(off1)+int64(off2)) 22899 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 22900 for { 22901 off1 := auxIntToInt32(v.AuxInt) 22902 sym := auxToSym(v.Aux) 22903 val := v_0 22904 if v_1.Op != OpAMD64ADDQconst { 22905 break 22906 } 22907 off2 := auxIntToInt32(v_1.AuxInt) 22908 ptr := v_1.Args[0] 22909 mem := v_2 22910 if !(is32Bit(int64(off1) + int64(off2))) { 22911 break 22912 } 22913 v.reset(OpAMD64XADDLlock) 22914 v.AuxInt = int32ToAuxInt(off1 + off2) 22915 v.Aux = symToAux(sym) 22916 v.AddArg3(val, ptr, mem) 22917 return true 22918 } 22919 return false 22920 } 22921 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool { 22922 v_2 := v.Args[2] 22923 v_1 := v.Args[1] 22924 v_0 := v.Args[0] 22925 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 22926 // cond: is32Bit(int64(off1)+int64(off2)) 22927 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 22928 for { 22929 off1 := auxIntToInt32(v.AuxInt) 22930 sym := auxToSym(v.Aux) 22931 val := v_0 22932 if v_1.Op != OpAMD64ADDQconst { 22933 break 22934 } 22935 off2 := auxIntToInt32(v_1.AuxInt) 22936 ptr := v_1.Args[0] 22937 mem := v_2 22938 if !(is32Bit(int64(off1) + int64(off2))) { 22939 break 22940 } 22941 v.reset(OpAMD64XADDQlock) 22942 v.AuxInt = int32ToAuxInt(off1 + off2) 22943 v.Aux = symToAux(sym) 22944 v.AddArg3(val, ptr, mem) 22945 return true 22946 } 22947 return false 22948 } 22949 func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { 22950 v_2 := v.Args[2] 22951 v_1 := v.Args[1] 22952 v_0 := v.Args[0] 22953 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 22954 // cond: is32Bit(int64(off1)+int64(off2)) 22955 // result: (XCHGL [off1+off2] {sym} val ptr mem) 22956 for { 22957 off1 := auxIntToInt32(v.AuxInt) 22958 sym := auxToSym(v.Aux) 22959 val := v_0 22960 if v_1.Op != OpAMD64ADDQconst { 22961 break 22962 } 22963 off2 := auxIntToInt32(v_1.AuxInt) 22964 ptr := v_1.Args[0] 22965 mem := v_2 22966 if !(is32Bit(int64(off1) + int64(off2))) { 22967 break 22968 } 22969 v.reset(OpAMD64XCHGL) 22970 v.AuxInt = int32ToAuxInt(off1 + off2) 22971 v.Aux = symToAux(sym) 22972 v.AddArg3(val, ptr, mem) 22973 return true 22974 } 22975 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 22976 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 22977 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 22978 for { 22979 off1 := auxIntToInt32(v.AuxInt) 22980 sym1 := auxToSym(v.Aux) 22981 val := v_0 22982 if v_1.Op != OpAMD64LEAQ { 22983 break 22984 } 22985 off2 := auxIntToInt32(v_1.AuxInt) 22986 sym2 := auxToSym(v_1.Aux) 22987 ptr := v_1.Args[0] 22988 mem := v_2 22989 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 22990 break 22991 } 22992 v.reset(OpAMD64XCHGL) 22993 v.AuxInt = int32ToAuxInt(off1 + off2) 22994 v.Aux = symToAux(mergeSym(sym1, sym2)) 22995 v.AddArg3(val, ptr, mem) 22996 return true 22997 } 22998 return false 22999 } 23000 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { 23001 v_2 := v.Args[2] 23002 v_1 := v.Args[1] 23003 v_0 := v.Args[0] 23004 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 23005 // cond: is32Bit(int64(off1)+int64(off2)) 23006 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 23007 for { 23008 off1 := auxIntToInt32(v.AuxInt) 23009 sym := auxToSym(v.Aux) 23010 val := v_0 23011 if v_1.Op != OpAMD64ADDQconst { 23012 break 23013 } 23014 off2 := auxIntToInt32(v_1.AuxInt) 23015 ptr := v_1.Args[0] 23016 mem := v_2 23017 if !(is32Bit(int64(off1) + int64(off2))) { 23018 break 23019 } 23020 v.reset(OpAMD64XCHGQ) 23021 v.AuxInt = int32ToAuxInt(off1 + off2) 23022 v.Aux = symToAux(sym) 23023 v.AddArg3(val, ptr, mem) 23024 return true 23025 } 23026 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 23027 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 23028 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 23029 for { 23030 off1 := auxIntToInt32(v.AuxInt) 23031 sym1 := auxToSym(v.Aux) 23032 val := v_0 23033 if v_1.Op != OpAMD64LEAQ { 23034 break 23035 } 23036 off2 := auxIntToInt32(v_1.AuxInt) 23037 sym2 := auxToSym(v_1.Aux) 23038 ptr := v_1.Args[0] 23039 mem := v_2 23040 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 23041 break 23042 } 23043 v.reset(OpAMD64XCHGQ) 23044 v.AuxInt = int32ToAuxInt(off1 + off2) 23045 v.Aux = symToAux(mergeSym(sym1, sym2)) 23046 v.AddArg3(val, ptr, mem) 23047 return true 23048 } 23049 return false 23050 } 23051 func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { 23052 v_1 := v.Args[1] 23053 v_0 := v.Args[0] 23054 // match: (XORL (SHLL (MOVLconst [1]) y) x) 23055 // result: (BTCL x y) 23056 for { 23057 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 23058 if v_0.Op != OpAMD64SHLL { 23059 continue 23060 } 23061 y := v_0.Args[1] 23062 v_0_0 := v_0.Args[0] 23063 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 { 23064 continue 23065 } 23066 x := v_1 23067 v.reset(OpAMD64BTCL) 23068 v.AddArg2(x, y) 23069 return true 23070 } 23071 break 23072 } 23073 // match: (XORL x (MOVLconst [c])) 23074 // result: (XORLconst [c] x) 23075 for { 23076 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 23077 x := v_0 23078 if v_1.Op != OpAMD64MOVLconst { 23079 continue 23080 } 23081 c := auxIntToInt32(v_1.AuxInt) 23082 v.reset(OpAMD64XORLconst) 23083 v.AuxInt = int32ToAuxInt(c) 23084 v.AddArg(x) 23085 return true 23086 } 23087 break 23088 } 23089 // match: (XORL x x) 23090 // result: (MOVLconst [0]) 23091 for { 23092 x := v_0 23093 if x != v_1 { 23094 break 23095 } 23096 v.reset(OpAMD64MOVLconst) 23097 v.AuxInt = int32ToAuxInt(0) 23098 return true 23099 } 23100 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 23101 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 23102 // result: (XORLload x [off] {sym} ptr mem) 23103 for { 23104 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 23105 x := v_0 23106 l := v_1 23107 if l.Op != OpAMD64MOVLload { 23108 continue 23109 } 23110 off := auxIntToInt32(l.AuxInt) 23111 sym := auxToSym(l.Aux) 23112 mem := l.Args[1] 23113 ptr := l.Args[0] 23114 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 23115 continue 23116 } 23117 v.reset(OpAMD64XORLload) 23118 v.AuxInt = int32ToAuxInt(off) 23119 v.Aux = symToAux(sym) 23120 v.AddArg3(x, ptr, mem) 23121 return true 23122 } 23123 break 23124 } 23125 // match: (XORL x (ADDLconst [-1] x)) 23126 // cond: buildcfg.GOAMD64 >= 3 23127 // result: (BLSMSKL x) 23128 for { 23129 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 23130 x := v_0 23131 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { 23132 continue 23133 } 23134 v.reset(OpAMD64BLSMSKL) 23135 v.AddArg(x) 23136 return true 23137 } 23138 break 23139 } 23140 return false 23141 } 23142 func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { 23143 v_0 := v.Args[0] 23144 // match: (XORLconst [1] (SETNE x)) 23145 // result: (SETEQ x) 23146 for { 23147 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE { 23148 break 23149 } 23150 x := v_0.Args[0] 23151 v.reset(OpAMD64SETEQ) 23152 v.AddArg(x) 23153 return true 23154 } 23155 // match: (XORLconst [1] (SETEQ x)) 23156 // result: (SETNE x) 23157 for { 23158 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ { 23159 break 23160 } 23161 x := v_0.Args[0] 23162 v.reset(OpAMD64SETNE) 23163 v.AddArg(x) 23164 return true 23165 } 23166 // match: (XORLconst [1] (SETL x)) 23167 // result: (SETGE x) 23168 for { 23169 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL { 23170 break 23171 } 23172 x := v_0.Args[0] 23173 v.reset(OpAMD64SETGE) 23174 v.AddArg(x) 23175 return true 23176 } 23177 // match: (XORLconst [1] (SETGE x)) 23178 // result: (SETL x) 23179 for { 23180 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE { 23181 break 23182 } 23183 x := v_0.Args[0] 23184 v.reset(OpAMD64SETL) 23185 v.AddArg(x) 23186 return true 23187 } 23188 // match: (XORLconst [1] (SETLE x)) 23189 // result: (SETG x) 23190 for { 23191 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE { 23192 break 23193 } 23194 x := v_0.Args[0] 23195 v.reset(OpAMD64SETG) 23196 v.AddArg(x) 23197 return true 23198 } 23199 // match: (XORLconst [1] (SETG x)) 23200 // result: (SETLE x) 23201 for { 23202 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG { 23203 break 23204 } 23205 x := v_0.Args[0] 23206 v.reset(OpAMD64SETLE) 23207 v.AddArg(x) 23208 return true 23209 } 23210 // match: (XORLconst [1] (SETB x)) 23211 // result: (SETAE x) 23212 for { 23213 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB { 23214 break 23215 } 23216 x := v_0.Args[0] 23217 v.reset(OpAMD64SETAE) 23218 v.AddArg(x) 23219 return true 23220 } 23221 // match: (XORLconst [1] (SETAE x)) 23222 // result: (SETB x) 23223 for { 23224 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE { 23225 break 23226 } 23227 x := v_0.Args[0] 23228 v.reset(OpAMD64SETB) 23229 v.AddArg(x) 23230 return true 23231 } 23232 // match: (XORLconst [1] (SETBE x)) 23233 // result: (SETA x) 23234 for { 23235 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE { 23236 break 23237 } 23238 x := v_0.Args[0] 23239 v.reset(OpAMD64SETA) 23240 v.AddArg(x) 23241 return true 23242 } 23243 // match: (XORLconst [1] (SETA x)) 23244 // result: (SETBE x) 23245 for { 23246 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA { 23247 break 23248 } 23249 x := v_0.Args[0] 23250 v.reset(OpAMD64SETBE) 23251 v.AddArg(x) 23252 return true 23253 } 23254 // match: (XORLconst [c] (XORLconst [d] x)) 23255 // result: (XORLconst [c ^ d] x) 23256 for { 23257 c := auxIntToInt32(v.AuxInt) 23258 if v_0.Op != OpAMD64XORLconst { 23259 break 23260 } 23261 d := auxIntToInt32(v_0.AuxInt) 23262 x := v_0.Args[0] 23263 v.reset(OpAMD64XORLconst) 23264 v.AuxInt = int32ToAuxInt(c ^ d) 23265 v.AddArg(x) 23266 return true 23267 } 23268 // match: (XORLconst [c] x) 23269 // cond: c==0 23270 // result: x 23271 for { 23272 c := auxIntToInt32(v.AuxInt) 23273 x := v_0 23274 if !(c == 0) { 23275 break 23276 } 23277 v.copyOf(x) 23278 return true 23279 } 23280 // match: (XORLconst [c] (MOVLconst [d])) 23281 // result: (MOVLconst [c^d]) 23282 for { 23283 c := auxIntToInt32(v.AuxInt) 23284 if v_0.Op != OpAMD64MOVLconst { 23285 break 23286 } 23287 d := auxIntToInt32(v_0.AuxInt) 23288 v.reset(OpAMD64MOVLconst) 23289 v.AuxInt = int32ToAuxInt(c ^ d) 23290 return true 23291 } 23292 return false 23293 } 23294 func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { 23295 v_1 := v.Args[1] 23296 v_0 := v.Args[0] 23297 // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 23298 // cond: ValAndOff(valoff1).canAdd32(off2) 23299 // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 23300 for { 23301 valoff1 := auxIntToValAndOff(v.AuxInt) 23302 sym := auxToSym(v.Aux) 23303 if v_0.Op != OpAMD64ADDQconst { 23304 break 23305 } 23306 off2 := auxIntToInt32(v_0.AuxInt) 23307 base := v_0.Args[0] 23308 mem := v_1 23309 if !(ValAndOff(valoff1).canAdd32(off2)) { 23310 break 23311 } 23312 v.reset(OpAMD64XORLconstmodify) 23313 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 23314 v.Aux = symToAux(sym) 23315 v.AddArg2(base, mem) 23316 return true 23317 } 23318 // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 23319 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 23320 // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 23321 for { 23322 valoff1 := auxIntToValAndOff(v.AuxInt) 23323 sym1 := auxToSym(v.Aux) 23324 if v_0.Op != OpAMD64LEAQ { 23325 break 23326 } 23327 off2 := auxIntToInt32(v_0.AuxInt) 23328 sym2 := auxToSym(v_0.Aux) 23329 base := v_0.Args[0] 23330 mem := v_1 23331 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 23332 break 23333 } 23334 v.reset(OpAMD64XORLconstmodify) 23335 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 23336 v.Aux = symToAux(mergeSym(sym1, sym2)) 23337 v.AddArg2(base, mem) 23338 return true 23339 } 23340 return false 23341 } 23342 func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { 23343 v_2 := v.Args[2] 23344 v_1 := v.Args[1] 23345 v_0 := v.Args[0] 23346 b := v.Block 23347 typ := &b.Func.Config.Types 23348 // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem) 23349 // cond: is32Bit(int64(off1)+int64(off2)) 23350 // result: (XORLload [off1+off2] {sym} val base mem) 23351 for { 23352 off1 := auxIntToInt32(v.AuxInt) 23353 sym := auxToSym(v.Aux) 23354 val := v_0 23355 if v_1.Op != OpAMD64ADDQconst { 23356 break 23357 } 23358 off2 := auxIntToInt32(v_1.AuxInt) 23359 base := v_1.Args[0] 23360 mem := v_2 23361 if !(is32Bit(int64(off1) + int64(off2))) { 23362 break 23363 } 23364 v.reset(OpAMD64XORLload) 23365 v.AuxInt = int32ToAuxInt(off1 + off2) 23366 v.Aux = symToAux(sym) 23367 v.AddArg3(val, base, mem) 23368 return true 23369 } 23370 // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 23371 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 23372 // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 23373 for { 23374 off1 := auxIntToInt32(v.AuxInt) 23375 sym1 := auxToSym(v.Aux) 23376 val := v_0 23377 if v_1.Op != OpAMD64LEAQ { 23378 break 23379 } 23380 off2 := auxIntToInt32(v_1.AuxInt) 23381 sym2 := auxToSym(v_1.Aux) 23382 base := v_1.Args[0] 23383 mem := v_2 23384 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 23385 break 23386 } 23387 v.reset(OpAMD64XORLload) 23388 v.AuxInt = int32ToAuxInt(off1 + off2) 23389 v.Aux = symToAux(mergeSym(sym1, sym2)) 23390 v.AddArg3(val, base, mem) 23391 return true 23392 } 23393 // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 23394 // result: (XORL x (MOVLf2i y)) 23395 for { 23396 off := auxIntToInt32(v.AuxInt) 23397 sym := auxToSym(v.Aux) 23398 x := v_0 23399 ptr := v_1 23400 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 23401 break 23402 } 23403 y := v_2.Args[1] 23404 if ptr != v_2.Args[0] { 23405 break 23406 } 23407 v.reset(OpAMD64XORL) 23408 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 23409 v0.AddArg(y) 23410 v.AddArg2(x, v0) 23411 return true 23412 } 23413 return false 23414 } 23415 func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool { 23416 v_2 := v.Args[2] 23417 v_1 := v.Args[1] 23418 v_0 := v.Args[0] 23419 // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 23420 // cond: is32Bit(int64(off1)+int64(off2)) 23421 // result: (XORLmodify [off1+off2] {sym} base val mem) 23422 for { 23423 off1 := auxIntToInt32(v.AuxInt) 23424 sym := auxToSym(v.Aux) 23425 if v_0.Op != OpAMD64ADDQconst { 23426 break 23427 } 23428 off2 := auxIntToInt32(v_0.AuxInt) 23429 base := v_0.Args[0] 23430 val := v_1 23431 mem := v_2 23432 if !(is32Bit(int64(off1) + int64(off2))) { 23433 break 23434 } 23435 v.reset(OpAMD64XORLmodify) 23436 v.AuxInt = int32ToAuxInt(off1 + off2) 23437 v.Aux = symToAux(sym) 23438 v.AddArg3(base, val, mem) 23439 return true 23440 } 23441 // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 23442 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 23443 // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 23444 for { 23445 off1 := auxIntToInt32(v.AuxInt) 23446 sym1 := auxToSym(v.Aux) 23447 if v_0.Op != OpAMD64LEAQ { 23448 break 23449 } 23450 off2 := auxIntToInt32(v_0.AuxInt) 23451 sym2 := auxToSym(v_0.Aux) 23452 base := v_0.Args[0] 23453 val := v_1 23454 mem := v_2 23455 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 23456 break 23457 } 23458 v.reset(OpAMD64XORLmodify) 23459 v.AuxInt = int32ToAuxInt(off1 + off2) 23460 v.Aux = symToAux(mergeSym(sym1, sym2)) 23461 v.AddArg3(base, val, mem) 23462 return true 23463 } 23464 return false 23465 } 23466 func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { 23467 v_1 := v.Args[1] 23468 v_0 := v.Args[0] 23469 // match: (XORQ (SHLQ (MOVQconst [1]) y) x) 23470 // result: (BTCQ x y) 23471 for { 23472 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 23473 if v_0.Op != OpAMD64SHLQ { 23474 continue 23475 } 23476 y := v_0.Args[1] 23477 v_0_0 := v_0.Args[0] 23478 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 { 23479 continue 23480 } 23481 x := v_1 23482 v.reset(OpAMD64BTCQ) 23483 v.AddArg2(x, y) 23484 return true 23485 } 23486 break 23487 } 23488 // match: (XORQ (MOVQconst [c]) x) 23489 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31 23490 // result: (BTCQconst [int8(log64(c))] x) 23491 for { 23492 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 23493 if v_0.Op != OpAMD64MOVQconst { 23494 continue 23495 } 23496 c := auxIntToInt64(v_0.AuxInt) 23497 x := v_1 23498 if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) { 23499 continue 23500 } 23501 v.reset(OpAMD64BTCQconst) 23502 v.AuxInt = int8ToAuxInt(int8(log64(c))) 23503 v.AddArg(x) 23504 return true 23505 } 23506 break 23507 } 23508 // match: (XORQ x (MOVQconst [c])) 23509 // cond: is32Bit(c) 23510 // result: (XORQconst [int32(c)] x) 23511 for { 23512 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 23513 x := v_0 23514 if v_1.Op != OpAMD64MOVQconst { 23515 continue 23516 } 23517 c := auxIntToInt64(v_1.AuxInt) 23518 if !(is32Bit(c)) { 23519 continue 23520 } 23521 v.reset(OpAMD64XORQconst) 23522 v.AuxInt = int32ToAuxInt(int32(c)) 23523 v.AddArg(x) 23524 return true 23525 } 23526 break 23527 } 23528 // match: (XORQ x x) 23529 // result: (MOVQconst [0]) 23530 for { 23531 x := v_0 23532 if x != v_1 { 23533 break 23534 } 23535 v.reset(OpAMD64MOVQconst) 23536 v.AuxInt = int64ToAuxInt(0) 23537 return true 23538 } 23539 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 23540 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 23541 // result: (XORQload x [off] {sym} ptr mem) 23542 for { 23543 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 23544 x := v_0 23545 l := v_1 23546 if l.Op != OpAMD64MOVQload { 23547 continue 23548 } 23549 off := auxIntToInt32(l.AuxInt) 23550 sym := auxToSym(l.Aux) 23551 mem := l.Args[1] 23552 ptr := l.Args[0] 23553 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 23554 continue 23555 } 23556 v.reset(OpAMD64XORQload) 23557 v.AuxInt = int32ToAuxInt(off) 23558 v.Aux = symToAux(sym) 23559 v.AddArg3(x, ptr, mem) 23560 return true 23561 } 23562 break 23563 } 23564 // match: (XORQ x (ADDQconst [-1] x)) 23565 // cond: buildcfg.GOAMD64 >= 3 23566 // result: (BLSMSKQ x) 23567 for { 23568 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { 23569 x := v_0 23570 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { 23571 continue 23572 } 23573 v.reset(OpAMD64BLSMSKQ) 23574 v.AddArg(x) 23575 return true 23576 } 23577 break 23578 } 23579 return false 23580 } 23581 func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool { 23582 v_0 := v.Args[0] 23583 // match: (XORQconst [c] (XORQconst [d] x)) 23584 // result: (XORQconst [c ^ d] x) 23585 for { 23586 c := auxIntToInt32(v.AuxInt) 23587 if v_0.Op != OpAMD64XORQconst { 23588 break 23589 } 23590 d := auxIntToInt32(v_0.AuxInt) 23591 x := v_0.Args[0] 23592 v.reset(OpAMD64XORQconst) 23593 v.AuxInt = int32ToAuxInt(c ^ d) 23594 v.AddArg(x) 23595 return true 23596 } 23597 // match: (XORQconst [0] x) 23598 // result: x 23599 for { 23600 if auxIntToInt32(v.AuxInt) != 0 { 23601 break 23602 } 23603 x := v_0 23604 v.copyOf(x) 23605 return true 23606 } 23607 // match: (XORQconst [c] (MOVQconst [d])) 23608 // result: (MOVQconst [int64(c)^d]) 23609 for { 23610 c := auxIntToInt32(v.AuxInt) 23611 if v_0.Op != OpAMD64MOVQconst { 23612 break 23613 } 23614 d := auxIntToInt64(v_0.AuxInt) 23615 v.reset(OpAMD64MOVQconst) 23616 v.AuxInt = int64ToAuxInt(int64(c) ^ d) 23617 return true 23618 } 23619 return false 23620 } 23621 func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool { 23622 v_1 := v.Args[1] 23623 v_0 := v.Args[0] 23624 // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 23625 // cond: ValAndOff(valoff1).canAdd32(off2) 23626 // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) 23627 for { 23628 valoff1 := auxIntToValAndOff(v.AuxInt) 23629 sym := auxToSym(v.Aux) 23630 if v_0.Op != OpAMD64ADDQconst { 23631 break 23632 } 23633 off2 := auxIntToInt32(v_0.AuxInt) 23634 base := v_0.Args[0] 23635 mem := v_1 23636 if !(ValAndOff(valoff1).canAdd32(off2)) { 23637 break 23638 } 23639 v.reset(OpAMD64XORQconstmodify) 23640 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 23641 v.Aux = symToAux(sym) 23642 v.AddArg2(base, mem) 23643 return true 23644 } 23645 // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 23646 // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) 23647 // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) 23648 for { 23649 valoff1 := auxIntToValAndOff(v.AuxInt) 23650 sym1 := auxToSym(v.Aux) 23651 if v_0.Op != OpAMD64LEAQ { 23652 break 23653 } 23654 off2 := auxIntToInt32(v_0.AuxInt) 23655 sym2 := auxToSym(v_0.Aux) 23656 base := v_0.Args[0] 23657 mem := v_1 23658 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { 23659 break 23660 } 23661 v.reset(OpAMD64XORQconstmodify) 23662 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) 23663 v.Aux = symToAux(mergeSym(sym1, sym2)) 23664 v.AddArg2(base, mem) 23665 return true 23666 } 23667 return false 23668 } 23669 func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { 23670 v_2 := v.Args[2] 23671 v_1 := v.Args[1] 23672 v_0 := v.Args[0] 23673 b := v.Block 23674 typ := &b.Func.Config.Types 23675 // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem) 23676 // cond: is32Bit(int64(off1)+int64(off2)) 23677 // result: (XORQload [off1+off2] {sym} val base mem) 23678 for { 23679 off1 := auxIntToInt32(v.AuxInt) 23680 sym := auxToSym(v.Aux) 23681 val := v_0 23682 if v_1.Op != OpAMD64ADDQconst { 23683 break 23684 } 23685 off2 := auxIntToInt32(v_1.AuxInt) 23686 base := v_1.Args[0] 23687 mem := v_2 23688 if !(is32Bit(int64(off1) + int64(off2))) { 23689 break 23690 } 23691 v.reset(OpAMD64XORQload) 23692 v.AuxInt = int32ToAuxInt(off1 + off2) 23693 v.Aux = symToAux(sym) 23694 v.AddArg3(val, base, mem) 23695 return true 23696 } 23697 // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 23698 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 23699 // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 23700 for { 23701 off1 := auxIntToInt32(v.AuxInt) 23702 sym1 := auxToSym(v.Aux) 23703 val := v_0 23704 if v_1.Op != OpAMD64LEAQ { 23705 break 23706 } 23707 off2 := auxIntToInt32(v_1.AuxInt) 23708 sym2 := auxToSym(v_1.Aux) 23709 base := v_1.Args[0] 23710 mem := v_2 23711 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 23712 break 23713 } 23714 v.reset(OpAMD64XORQload) 23715 v.AuxInt = int32ToAuxInt(off1 + off2) 23716 v.Aux = symToAux(mergeSym(sym1, sym2)) 23717 v.AddArg3(val, base, mem) 23718 return true 23719 } 23720 // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 23721 // result: (XORQ x (MOVQf2i y)) 23722 for { 23723 off := auxIntToInt32(v.AuxInt) 23724 sym := auxToSym(v.Aux) 23725 x := v_0 23726 ptr := v_1 23727 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { 23728 break 23729 } 23730 y := v_2.Args[1] 23731 if ptr != v_2.Args[0] { 23732 break 23733 } 23734 v.reset(OpAMD64XORQ) 23735 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 23736 v0.AddArg(y) 23737 v.AddArg2(x, v0) 23738 return true 23739 } 23740 return false 23741 } 23742 func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { 23743 v_2 := v.Args[2] 23744 v_1 := v.Args[1] 23745 v_0 := v.Args[0] 23746 // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 23747 // cond: is32Bit(int64(off1)+int64(off2)) 23748 // result: (XORQmodify [off1+off2] {sym} base val mem) 23749 for { 23750 off1 := auxIntToInt32(v.AuxInt) 23751 sym := auxToSym(v.Aux) 23752 if v_0.Op != OpAMD64ADDQconst { 23753 break 23754 } 23755 off2 := auxIntToInt32(v_0.AuxInt) 23756 base := v_0.Args[0] 23757 val := v_1 23758 mem := v_2 23759 if !(is32Bit(int64(off1) + int64(off2))) { 23760 break 23761 } 23762 v.reset(OpAMD64XORQmodify) 23763 v.AuxInt = int32ToAuxInt(off1 + off2) 23764 v.Aux = symToAux(sym) 23765 v.AddArg3(base, val, mem) 23766 return true 23767 } 23768 // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 23769 // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) 23770 // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 23771 for { 23772 off1 := auxIntToInt32(v.AuxInt) 23773 sym1 := auxToSym(v.Aux) 23774 if v_0.Op != OpAMD64LEAQ { 23775 break 23776 } 23777 off2 := auxIntToInt32(v_0.AuxInt) 23778 sym2 := auxToSym(v_0.Aux) 23779 base := v_0.Args[0] 23780 val := v_1 23781 mem := v_2 23782 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { 23783 break 23784 } 23785 v.reset(OpAMD64XORQmodify) 23786 v.AuxInt = int32ToAuxInt(off1 + off2) 23787 v.Aux = symToAux(mergeSym(sym1, sym2)) 23788 v.AddArg3(base, val, mem) 23789 return true 23790 } 23791 return false 23792 } 23793 func rewriteValueAMD64_OpAddr(v *Value) bool { 23794 v_0 := v.Args[0] 23795 // match: (Addr {sym} base) 23796 // result: (LEAQ {sym} base) 23797 for { 23798 sym := auxToSym(v.Aux) 23799 base := v_0 23800 v.reset(OpAMD64LEAQ) 23801 v.Aux = symToAux(sym) 23802 v.AddArg(base) 23803 return true 23804 } 23805 } 23806 func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { 23807 v_2 := v.Args[2] 23808 v_1 := v.Args[1] 23809 v_0 := v.Args[0] 23810 b := v.Block 23811 typ := &b.Func.Config.Types 23812 // match: (AtomicAdd32 ptr val mem) 23813 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 23814 for { 23815 ptr := v_0 23816 val := v_1 23817 mem := v_2 23818 v.reset(OpAMD64AddTupleFirst32) 23819 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 23820 v0.AddArg3(val, ptr, mem) 23821 v.AddArg2(val, v0) 23822 return true 23823 } 23824 } 23825 func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { 23826 v_2 := v.Args[2] 23827 v_1 := v.Args[1] 23828 v_0 := v.Args[0] 23829 b := v.Block 23830 typ := &b.Func.Config.Types 23831 // match: (AtomicAdd64 ptr val mem) 23832 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 23833 for { 23834 ptr := v_0 23835 val := v_1 23836 mem := v_2 23837 v.reset(OpAMD64AddTupleFirst64) 23838 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 23839 v0.AddArg3(val, ptr, mem) 23840 v.AddArg2(val, v0) 23841 return true 23842 } 23843 } 23844 func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { 23845 v_2 := v.Args[2] 23846 v_1 := v.Args[1] 23847 v_0 := v.Args[0] 23848 // match: (AtomicAnd32 ptr val mem) 23849 // result: (ANDLlock ptr val mem) 23850 for { 23851 ptr := v_0 23852 val := v_1 23853 mem := v_2 23854 v.reset(OpAMD64ANDLlock) 23855 v.AddArg3(ptr, val, mem) 23856 return true 23857 } 23858 } 23859 func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { 23860 v_2 := v.Args[2] 23861 v_1 := v.Args[1] 23862 v_0 := v.Args[0] 23863 // match: (AtomicAnd8 ptr val mem) 23864 // result: (ANDBlock ptr val mem) 23865 for { 23866 ptr := v_0 23867 val := v_1 23868 mem := v_2 23869 v.reset(OpAMD64ANDBlock) 23870 v.AddArg3(ptr, val, mem) 23871 return true 23872 } 23873 } 23874 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { 23875 v_3 := v.Args[3] 23876 v_2 := v.Args[2] 23877 v_1 := v.Args[1] 23878 v_0 := v.Args[0] 23879 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 23880 // result: (CMPXCHGLlock ptr old new_ mem) 23881 for { 23882 ptr := v_0 23883 old := v_1 23884 new_ := v_2 23885 mem := v_3 23886 v.reset(OpAMD64CMPXCHGLlock) 23887 v.AddArg4(ptr, old, new_, mem) 23888 return true 23889 } 23890 } 23891 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { 23892 v_3 := v.Args[3] 23893 v_2 := v.Args[2] 23894 v_1 := v.Args[1] 23895 v_0 := v.Args[0] 23896 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 23897 // result: (CMPXCHGQlock ptr old new_ mem) 23898 for { 23899 ptr := v_0 23900 old := v_1 23901 new_ := v_2 23902 mem := v_3 23903 v.reset(OpAMD64CMPXCHGQlock) 23904 v.AddArg4(ptr, old, new_, mem) 23905 return true 23906 } 23907 } 23908 func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { 23909 v_2 := v.Args[2] 23910 v_1 := v.Args[1] 23911 v_0 := v.Args[0] 23912 // match: (AtomicExchange32 ptr val mem) 23913 // result: (XCHGL val ptr mem) 23914 for { 23915 ptr := v_0 23916 val := v_1 23917 mem := v_2 23918 v.reset(OpAMD64XCHGL) 23919 v.AddArg3(val, ptr, mem) 23920 return true 23921 } 23922 } 23923 func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { 23924 v_2 := v.Args[2] 23925 v_1 := v.Args[1] 23926 v_0 := v.Args[0] 23927 // match: (AtomicExchange64 ptr val mem) 23928 // result: (XCHGQ val ptr mem) 23929 for { 23930 ptr := v_0 23931 val := v_1 23932 mem := v_2 23933 v.reset(OpAMD64XCHGQ) 23934 v.AddArg3(val, ptr, mem) 23935 return true 23936 } 23937 } 23938 func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { 23939 v_1 := v.Args[1] 23940 v_0 := v.Args[0] 23941 // match: (AtomicLoad32 ptr mem) 23942 // result: (MOVLatomicload ptr mem) 23943 for { 23944 ptr := v_0 23945 mem := v_1 23946 v.reset(OpAMD64MOVLatomicload) 23947 v.AddArg2(ptr, mem) 23948 return true 23949 } 23950 } 23951 func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { 23952 v_1 := v.Args[1] 23953 v_0 := v.Args[0] 23954 // match: (AtomicLoad64 ptr mem) 23955 // result: (MOVQatomicload ptr mem) 23956 for { 23957 ptr := v_0 23958 mem := v_1 23959 v.reset(OpAMD64MOVQatomicload) 23960 v.AddArg2(ptr, mem) 23961 return true 23962 } 23963 } 23964 func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { 23965 v_1 := v.Args[1] 23966 v_0 := v.Args[0] 23967 // match: (AtomicLoad8 ptr mem) 23968 // result: (MOVBatomicload ptr mem) 23969 for { 23970 ptr := v_0 23971 mem := v_1 23972 v.reset(OpAMD64MOVBatomicload) 23973 v.AddArg2(ptr, mem) 23974 return true 23975 } 23976 } 23977 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { 23978 v_1 := v.Args[1] 23979 v_0 := v.Args[0] 23980 // match: (AtomicLoadPtr ptr mem) 23981 // result: (MOVQatomicload ptr mem) 23982 for { 23983 ptr := v_0 23984 mem := v_1 23985 v.reset(OpAMD64MOVQatomicload) 23986 v.AddArg2(ptr, mem) 23987 return true 23988 } 23989 } 23990 func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { 23991 v_2 := v.Args[2] 23992 v_1 := v.Args[1] 23993 v_0 := v.Args[0] 23994 // match: (AtomicOr32 ptr val mem) 23995 // result: (ORLlock ptr val mem) 23996 for { 23997 ptr := v_0 23998 val := v_1 23999 mem := v_2 24000 v.reset(OpAMD64ORLlock) 24001 v.AddArg3(ptr, val, mem) 24002 return true 24003 } 24004 } 24005 func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { 24006 v_2 := v.Args[2] 24007 v_1 := v.Args[1] 24008 v_0 := v.Args[0] 24009 // match: (AtomicOr8 ptr val mem) 24010 // result: (ORBlock ptr val mem) 24011 for { 24012 ptr := v_0 24013 val := v_1 24014 mem := v_2 24015 v.reset(OpAMD64ORBlock) 24016 v.AddArg3(ptr, val, mem) 24017 return true 24018 } 24019 } 24020 func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { 24021 v_2 := v.Args[2] 24022 v_1 := v.Args[1] 24023 v_0 := v.Args[0] 24024 b := v.Block 24025 typ := &b.Func.Config.Types 24026 // match: (AtomicStore32 ptr val mem) 24027 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 24028 for { 24029 ptr := v_0 24030 val := v_1 24031 mem := v_2 24032 v.reset(OpSelect1) 24033 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 24034 v0.AddArg3(val, ptr, mem) 24035 v.AddArg(v0) 24036 return true 24037 } 24038 } 24039 func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { 24040 v_2 := v.Args[2] 24041 v_1 := v.Args[1] 24042 v_0 := v.Args[0] 24043 b := v.Block 24044 typ := &b.Func.Config.Types 24045 // match: (AtomicStore64 ptr val mem) 24046 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 24047 for { 24048 ptr := v_0 24049 val := v_1 24050 mem := v_2 24051 v.reset(OpSelect1) 24052 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 24053 v0.AddArg3(val, ptr, mem) 24054 v.AddArg(v0) 24055 return true 24056 } 24057 } 24058 func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { 24059 v_2 := v.Args[2] 24060 v_1 := v.Args[1] 24061 v_0 := v.Args[0] 24062 b := v.Block 24063 typ := &b.Func.Config.Types 24064 // match: (AtomicStore8 ptr val mem) 24065 // result: (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem)) 24066 for { 24067 ptr := v_0 24068 val := v_1 24069 mem := v_2 24070 v.reset(OpSelect1) 24071 v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) 24072 v0.AddArg3(val, ptr, mem) 24073 v.AddArg(v0) 24074 return true 24075 } 24076 } 24077 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { 24078 v_2 := v.Args[2] 24079 v_1 := v.Args[1] 24080 v_0 := v.Args[0] 24081 b := v.Block 24082 typ := &b.Func.Config.Types 24083 // match: (AtomicStorePtrNoWB ptr val mem) 24084 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 24085 for { 24086 ptr := v_0 24087 val := v_1 24088 mem := v_2 24089 v.reset(OpSelect1) 24090 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 24091 v0.AddArg3(val, ptr, mem) 24092 v.AddArg(v0) 24093 return true 24094 } 24095 } 24096 func rewriteValueAMD64_OpBitLen16(v *Value) bool { 24097 v_0 := v.Args[0] 24098 b := v.Block 24099 typ := &b.Func.Config.Types 24100 // match: (BitLen16 x) 24101 // cond: buildcfg.GOAMD64 < 3 24102 // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x))) 24103 for { 24104 x := v_0 24105 if !(buildcfg.GOAMD64 < 3) { 24106 break 24107 } 24108 v.reset(OpAMD64BSRL) 24109 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) 24110 v0.AuxInt = int32ToAuxInt(1) 24111 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 24112 v1.AddArg(x) 24113 v0.AddArg2(v1, v1) 24114 v.AddArg(v0) 24115 return true 24116 } 24117 // match: (BitLen16 <t> x) 24118 // cond: buildcfg.GOAMD64 >= 3 24119 // result: (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVWQZX <x.Type> x)))) 24120 for { 24121 t := v.Type 24122 x := v_0 24123 if !(buildcfg.GOAMD64 >= 3) { 24124 break 24125 } 24126 v.reset(OpAMD64NEGQ) 24127 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) 24128 v0.AuxInt = int32ToAuxInt(-32) 24129 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) 24130 v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) 24131 v2.AddArg(x) 24132 v1.AddArg(v2) 24133 v0.AddArg(v1) 24134 v.AddArg(v0) 24135 return true 24136 } 24137 return false 24138 } 24139 func rewriteValueAMD64_OpBitLen32(v *Value) bool { 24140 v_0 := v.Args[0] 24141 b := v.Block 24142 typ := &b.Func.Config.Types 24143 // match: (BitLen32 x) 24144 // cond: buildcfg.GOAMD64 < 3 24145 // result: (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x)))) 24146 for { 24147 x := v_0 24148 if !(buildcfg.GOAMD64 < 3) { 24149 break 24150 } 24151 v.reset(OpSelect0) 24152 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 24153 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) 24154 v1.AuxInt = int32ToAuxInt(1) 24155 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 24156 v2.AddArg(x) 24157 v1.AddArg2(v2, v2) 24158 v0.AddArg(v1) 24159 v.AddArg(v0) 24160 return true 24161 } 24162 // match: (BitLen32 <t> x) 24163 // cond: buildcfg.GOAMD64 >= 3 24164 // result: (NEGQ (ADDQconst <t> [-32] (LZCNTL x))) 24165 for { 24166 t := v.Type 24167 x := v_0 24168 if !(buildcfg.GOAMD64 >= 3) { 24169 break 24170 } 24171 v.reset(OpAMD64NEGQ) 24172 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) 24173 v0.AuxInt = int32ToAuxInt(-32) 24174 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) 24175 v1.AddArg(x) 24176 v0.AddArg(v1) 24177 v.AddArg(v0) 24178 return true 24179 } 24180 return false 24181 } 24182 func rewriteValueAMD64_OpBitLen64(v *Value) bool { 24183 v_0 := v.Args[0] 24184 b := v.Block 24185 typ := &b.Func.Config.Types 24186 // match: (BitLen64 <t> x) 24187 // cond: buildcfg.GOAMD64 < 3 24188 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 24189 for { 24190 t := v.Type 24191 x := v_0 24192 if !(buildcfg.GOAMD64 < 3) { 24193 break 24194 } 24195 v.reset(OpAMD64ADDQconst) 24196 v.AuxInt = int32ToAuxInt(1) 24197 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 24198 v1 := b.NewValue0(v.Pos, OpSelect0, t) 24199 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 24200 v2.AddArg(x) 24201 v1.AddArg(v2) 24202 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 24203 v3.AuxInt = int64ToAuxInt(-1) 24204 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 24205 v4.AddArg(v2) 24206 v0.AddArg3(v1, v3, v4) 24207 v.AddArg(v0) 24208 return true 24209 } 24210 // match: (BitLen64 <t> x) 24211 // cond: buildcfg.GOAMD64 >= 3 24212 // result: (NEGQ (ADDQconst <t> [-64] (LZCNTQ x))) 24213 for { 24214 t := v.Type 24215 x := v_0 24216 if !(buildcfg.GOAMD64 >= 3) { 24217 break 24218 } 24219 v.reset(OpAMD64NEGQ) 24220 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) 24221 v0.AuxInt = int32ToAuxInt(-64) 24222 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) 24223 v1.AddArg(x) 24224 v0.AddArg(v1) 24225 v.AddArg(v0) 24226 return true 24227 } 24228 return false 24229 } 24230 func rewriteValueAMD64_OpBitLen8(v *Value) bool { 24231 v_0 := v.Args[0] 24232 b := v.Block 24233 typ := &b.Func.Config.Types 24234 // match: (BitLen8 x) 24235 // cond: buildcfg.GOAMD64 < 3 24236 // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x))) 24237 for { 24238 x := v_0 24239 if !(buildcfg.GOAMD64 < 3) { 24240 break 24241 } 24242 v.reset(OpAMD64BSRL) 24243 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) 24244 v0.AuxInt = int32ToAuxInt(1) 24245 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 24246 v1.AddArg(x) 24247 v0.AddArg2(v1, v1) 24248 v.AddArg(v0) 24249 return true 24250 } 24251 // match: (BitLen8 <t> x) 24252 // cond: buildcfg.GOAMD64 >= 3 24253 // result: (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVBQZX <x.Type> x)))) 24254 for { 24255 t := v.Type 24256 x := v_0 24257 if !(buildcfg.GOAMD64 >= 3) { 24258 break 24259 } 24260 v.reset(OpAMD64NEGQ) 24261 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) 24262 v0.AuxInt = int32ToAuxInt(-32) 24263 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) 24264 v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) 24265 v2.AddArg(x) 24266 v1.AddArg(v2) 24267 v0.AddArg(v1) 24268 v.AddArg(v0) 24269 return true 24270 } 24271 return false 24272 } 24273 func rewriteValueAMD64_OpBswap16(v *Value) bool { 24274 v_0 := v.Args[0] 24275 // match: (Bswap16 x) 24276 // result: (ROLWconst [8] x) 24277 for { 24278 x := v_0 24279 v.reset(OpAMD64ROLWconst) 24280 v.AuxInt = int8ToAuxInt(8) 24281 v.AddArg(x) 24282 return true 24283 } 24284 } 24285 func rewriteValueAMD64_OpCeil(v *Value) bool { 24286 v_0 := v.Args[0] 24287 // match: (Ceil x) 24288 // result: (ROUNDSD [2] x) 24289 for { 24290 x := v_0 24291 v.reset(OpAMD64ROUNDSD) 24292 v.AuxInt = int8ToAuxInt(2) 24293 v.AddArg(x) 24294 return true 24295 } 24296 } 24297 func rewriteValueAMD64_OpCondSelect(v *Value) bool { 24298 v_2 := v.Args[2] 24299 v_1 := v.Args[1] 24300 v_0 := v.Args[0] 24301 b := v.Block 24302 typ := &b.Func.Config.Types 24303 // match: (CondSelect <t> x y (SETEQ cond)) 24304 // cond: (is64BitInt(t) || isPtr(t)) 24305 // result: (CMOVQEQ y x cond) 24306 for { 24307 t := v.Type 24308 x := v_0 24309 y := v_1 24310 if v_2.Op != OpAMD64SETEQ { 24311 break 24312 } 24313 cond := v_2.Args[0] 24314 if !(is64BitInt(t) || isPtr(t)) { 24315 break 24316 } 24317 v.reset(OpAMD64CMOVQEQ) 24318 v.AddArg3(y, x, cond) 24319 return true 24320 } 24321 // match: (CondSelect <t> x y (SETNE cond)) 24322 // cond: (is64BitInt(t) || isPtr(t)) 24323 // result: (CMOVQNE y x cond) 24324 for { 24325 t := v.Type 24326 x := v_0 24327 y := v_1 24328 if v_2.Op != OpAMD64SETNE { 24329 break 24330 } 24331 cond := v_2.Args[0] 24332 if !(is64BitInt(t) || isPtr(t)) { 24333 break 24334 } 24335 v.reset(OpAMD64CMOVQNE) 24336 v.AddArg3(y, x, cond) 24337 return true 24338 } 24339 // match: (CondSelect <t> x y (SETL cond)) 24340 // cond: (is64BitInt(t) || isPtr(t)) 24341 // result: (CMOVQLT y x cond) 24342 for { 24343 t := v.Type 24344 x := v_0 24345 y := v_1 24346 if v_2.Op != OpAMD64SETL { 24347 break 24348 } 24349 cond := v_2.Args[0] 24350 if !(is64BitInt(t) || isPtr(t)) { 24351 break 24352 } 24353 v.reset(OpAMD64CMOVQLT) 24354 v.AddArg3(y, x, cond) 24355 return true 24356 } 24357 // match: (CondSelect <t> x y (SETG cond)) 24358 // cond: (is64BitInt(t) || isPtr(t)) 24359 // result: (CMOVQGT y x cond) 24360 for { 24361 t := v.Type 24362 x := v_0 24363 y := v_1 24364 if v_2.Op != OpAMD64SETG { 24365 break 24366 } 24367 cond := v_2.Args[0] 24368 if !(is64BitInt(t) || isPtr(t)) { 24369 break 24370 } 24371 v.reset(OpAMD64CMOVQGT) 24372 v.AddArg3(y, x, cond) 24373 return true 24374 } 24375 // match: (CondSelect <t> x y (SETLE cond)) 24376 // cond: (is64BitInt(t) || isPtr(t)) 24377 // result: (CMOVQLE y x cond) 24378 for { 24379 t := v.Type 24380 x := v_0 24381 y := v_1 24382 if v_2.Op != OpAMD64SETLE { 24383 break 24384 } 24385 cond := v_2.Args[0] 24386 if !(is64BitInt(t) || isPtr(t)) { 24387 break 24388 } 24389 v.reset(OpAMD64CMOVQLE) 24390 v.AddArg3(y, x, cond) 24391 return true 24392 } 24393 // match: (CondSelect <t> x y (SETGE cond)) 24394 // cond: (is64BitInt(t) || isPtr(t)) 24395 // result: (CMOVQGE y x cond) 24396 for { 24397 t := v.Type 24398 x := v_0 24399 y := v_1 24400 if v_2.Op != OpAMD64SETGE { 24401 break 24402 } 24403 cond := v_2.Args[0] 24404 if !(is64BitInt(t) || isPtr(t)) { 24405 break 24406 } 24407 v.reset(OpAMD64CMOVQGE) 24408 v.AddArg3(y, x, cond) 24409 return true 24410 } 24411 // match: (CondSelect <t> x y (SETA cond)) 24412 // cond: (is64BitInt(t) || isPtr(t)) 24413 // result: (CMOVQHI y x cond) 24414 for { 24415 t := v.Type 24416 x := v_0 24417 y := v_1 24418 if v_2.Op != OpAMD64SETA { 24419 break 24420 } 24421 cond := v_2.Args[0] 24422 if !(is64BitInt(t) || isPtr(t)) { 24423 break 24424 } 24425 v.reset(OpAMD64CMOVQHI) 24426 v.AddArg3(y, x, cond) 24427 return true 24428 } 24429 // match: (CondSelect <t> x y (SETB cond)) 24430 // cond: (is64BitInt(t) || isPtr(t)) 24431 // result: (CMOVQCS y x cond) 24432 for { 24433 t := v.Type 24434 x := v_0 24435 y := v_1 24436 if v_2.Op != OpAMD64SETB { 24437 break 24438 } 24439 cond := v_2.Args[0] 24440 if !(is64BitInt(t) || isPtr(t)) { 24441 break 24442 } 24443 v.reset(OpAMD64CMOVQCS) 24444 v.AddArg3(y, x, cond) 24445 return true 24446 } 24447 // match: (CondSelect <t> x y (SETAE cond)) 24448 // cond: (is64BitInt(t) || isPtr(t)) 24449 // result: (CMOVQCC y x cond) 24450 for { 24451 t := v.Type 24452 x := v_0 24453 y := v_1 24454 if v_2.Op != OpAMD64SETAE { 24455 break 24456 } 24457 cond := v_2.Args[0] 24458 if !(is64BitInt(t) || isPtr(t)) { 24459 break 24460 } 24461 v.reset(OpAMD64CMOVQCC) 24462 v.AddArg3(y, x, cond) 24463 return true 24464 } 24465 // match: (CondSelect <t> x y (SETBE cond)) 24466 // cond: (is64BitInt(t) || isPtr(t)) 24467 // result: (CMOVQLS y x cond) 24468 for { 24469 t := v.Type 24470 x := v_0 24471 y := v_1 24472 if v_2.Op != OpAMD64SETBE { 24473 break 24474 } 24475 cond := v_2.Args[0] 24476 if !(is64BitInt(t) || isPtr(t)) { 24477 break 24478 } 24479 v.reset(OpAMD64CMOVQLS) 24480 v.AddArg3(y, x, cond) 24481 return true 24482 } 24483 // match: (CondSelect <t> x y (SETEQF cond)) 24484 // cond: (is64BitInt(t) || isPtr(t)) 24485 // result: (CMOVQEQF y x cond) 24486 for { 24487 t := v.Type 24488 x := v_0 24489 y := v_1 24490 if v_2.Op != OpAMD64SETEQF { 24491 break 24492 } 24493 cond := v_2.Args[0] 24494 if !(is64BitInt(t) || isPtr(t)) { 24495 break 24496 } 24497 v.reset(OpAMD64CMOVQEQF) 24498 v.AddArg3(y, x, cond) 24499 return true 24500 } 24501 // match: (CondSelect <t> x y (SETNEF cond)) 24502 // cond: (is64BitInt(t) || isPtr(t)) 24503 // result: (CMOVQNEF y x cond) 24504 for { 24505 t := v.Type 24506 x := v_0 24507 y := v_1 24508 if v_2.Op != OpAMD64SETNEF { 24509 break 24510 } 24511 cond := v_2.Args[0] 24512 if !(is64BitInt(t) || isPtr(t)) { 24513 break 24514 } 24515 v.reset(OpAMD64CMOVQNEF) 24516 v.AddArg3(y, x, cond) 24517 return true 24518 } 24519 // match: (CondSelect <t> x y (SETGF cond)) 24520 // cond: (is64BitInt(t) || isPtr(t)) 24521 // result: (CMOVQGTF y x cond) 24522 for { 24523 t := v.Type 24524 x := v_0 24525 y := v_1 24526 if v_2.Op != OpAMD64SETGF { 24527 break 24528 } 24529 cond := v_2.Args[0] 24530 if !(is64BitInt(t) || isPtr(t)) { 24531 break 24532 } 24533 v.reset(OpAMD64CMOVQGTF) 24534 v.AddArg3(y, x, cond) 24535 return true 24536 } 24537 // match: (CondSelect <t> x y (SETGEF cond)) 24538 // cond: (is64BitInt(t) || isPtr(t)) 24539 // result: (CMOVQGEF y x cond) 24540 for { 24541 t := v.Type 24542 x := v_0 24543 y := v_1 24544 if v_2.Op != OpAMD64SETGEF { 24545 break 24546 } 24547 cond := v_2.Args[0] 24548 if !(is64BitInt(t) || isPtr(t)) { 24549 break 24550 } 24551 v.reset(OpAMD64CMOVQGEF) 24552 v.AddArg3(y, x, cond) 24553 return true 24554 } 24555 // match: (CondSelect <t> x y (SETEQ cond)) 24556 // cond: is32BitInt(t) 24557 // result: (CMOVLEQ y x cond) 24558 for { 24559 t := v.Type 24560 x := v_0 24561 y := v_1 24562 if v_2.Op != OpAMD64SETEQ { 24563 break 24564 } 24565 cond := v_2.Args[0] 24566 if !(is32BitInt(t)) { 24567 break 24568 } 24569 v.reset(OpAMD64CMOVLEQ) 24570 v.AddArg3(y, x, cond) 24571 return true 24572 } 24573 // match: (CondSelect <t> x y (SETNE cond)) 24574 // cond: is32BitInt(t) 24575 // result: (CMOVLNE y x cond) 24576 for { 24577 t := v.Type 24578 x := v_0 24579 y := v_1 24580 if v_2.Op != OpAMD64SETNE { 24581 break 24582 } 24583 cond := v_2.Args[0] 24584 if !(is32BitInt(t)) { 24585 break 24586 } 24587 v.reset(OpAMD64CMOVLNE) 24588 v.AddArg3(y, x, cond) 24589 return true 24590 } 24591 // match: (CondSelect <t> x y (SETL cond)) 24592 // cond: is32BitInt(t) 24593 // result: (CMOVLLT y x cond) 24594 for { 24595 t := v.Type 24596 x := v_0 24597 y := v_1 24598 if v_2.Op != OpAMD64SETL { 24599 break 24600 } 24601 cond := v_2.Args[0] 24602 if !(is32BitInt(t)) { 24603 break 24604 } 24605 v.reset(OpAMD64CMOVLLT) 24606 v.AddArg3(y, x, cond) 24607 return true 24608 } 24609 // match: (CondSelect <t> x y (SETG cond)) 24610 // cond: is32BitInt(t) 24611 // result: (CMOVLGT y x cond) 24612 for { 24613 t := v.Type 24614 x := v_0 24615 y := v_1 24616 if v_2.Op != OpAMD64SETG { 24617 break 24618 } 24619 cond := v_2.Args[0] 24620 if !(is32BitInt(t)) { 24621 break 24622 } 24623 v.reset(OpAMD64CMOVLGT) 24624 v.AddArg3(y, x, cond) 24625 return true 24626 } 24627 // match: (CondSelect <t> x y (SETLE cond)) 24628 // cond: is32BitInt(t) 24629 // result: (CMOVLLE y x cond) 24630 for { 24631 t := v.Type 24632 x := v_0 24633 y := v_1 24634 if v_2.Op != OpAMD64SETLE { 24635 break 24636 } 24637 cond := v_2.Args[0] 24638 if !(is32BitInt(t)) { 24639 break 24640 } 24641 v.reset(OpAMD64CMOVLLE) 24642 v.AddArg3(y, x, cond) 24643 return true 24644 } 24645 // match: (CondSelect <t> x y (SETGE cond)) 24646 // cond: is32BitInt(t) 24647 // result: (CMOVLGE y x cond) 24648 for { 24649 t := v.Type 24650 x := v_0 24651 y := v_1 24652 if v_2.Op != OpAMD64SETGE { 24653 break 24654 } 24655 cond := v_2.Args[0] 24656 if !(is32BitInt(t)) { 24657 break 24658 } 24659 v.reset(OpAMD64CMOVLGE) 24660 v.AddArg3(y, x, cond) 24661 return true 24662 } 24663 // match: (CondSelect <t> x y (SETA cond)) 24664 // cond: is32BitInt(t) 24665 // result: (CMOVLHI y x cond) 24666 for { 24667 t := v.Type 24668 x := v_0 24669 y := v_1 24670 if v_2.Op != OpAMD64SETA { 24671 break 24672 } 24673 cond := v_2.Args[0] 24674 if !(is32BitInt(t)) { 24675 break 24676 } 24677 v.reset(OpAMD64CMOVLHI) 24678 v.AddArg3(y, x, cond) 24679 return true 24680 } 24681 // match: (CondSelect <t> x y (SETB cond)) 24682 // cond: is32BitInt(t) 24683 // result: (CMOVLCS y x cond) 24684 for { 24685 t := v.Type 24686 x := v_0 24687 y := v_1 24688 if v_2.Op != OpAMD64SETB { 24689 break 24690 } 24691 cond := v_2.Args[0] 24692 if !(is32BitInt(t)) { 24693 break 24694 } 24695 v.reset(OpAMD64CMOVLCS) 24696 v.AddArg3(y, x, cond) 24697 return true 24698 } 24699 // match: (CondSelect <t> x y (SETAE cond)) 24700 // cond: is32BitInt(t) 24701 // result: (CMOVLCC y x cond) 24702 for { 24703 t := v.Type 24704 x := v_0 24705 y := v_1 24706 if v_2.Op != OpAMD64SETAE { 24707 break 24708 } 24709 cond := v_2.Args[0] 24710 if !(is32BitInt(t)) { 24711 break 24712 } 24713 v.reset(OpAMD64CMOVLCC) 24714 v.AddArg3(y, x, cond) 24715 return true 24716 } 24717 // match: (CondSelect <t> x y (SETBE cond)) 24718 // cond: is32BitInt(t) 24719 // result: (CMOVLLS y x cond) 24720 for { 24721 t := v.Type 24722 x := v_0 24723 y := v_1 24724 if v_2.Op != OpAMD64SETBE { 24725 break 24726 } 24727 cond := v_2.Args[0] 24728 if !(is32BitInt(t)) { 24729 break 24730 } 24731 v.reset(OpAMD64CMOVLLS) 24732 v.AddArg3(y, x, cond) 24733 return true 24734 } 24735 // match: (CondSelect <t> x y (SETEQF cond)) 24736 // cond: is32BitInt(t) 24737 // result: (CMOVLEQF y x cond) 24738 for { 24739 t := v.Type 24740 x := v_0 24741 y := v_1 24742 if v_2.Op != OpAMD64SETEQF { 24743 break 24744 } 24745 cond := v_2.Args[0] 24746 if !(is32BitInt(t)) { 24747 break 24748 } 24749 v.reset(OpAMD64CMOVLEQF) 24750 v.AddArg3(y, x, cond) 24751 return true 24752 } 24753 // match: (CondSelect <t> x y (SETNEF cond)) 24754 // cond: is32BitInt(t) 24755 // result: (CMOVLNEF y x cond) 24756 for { 24757 t := v.Type 24758 x := v_0 24759 y := v_1 24760 if v_2.Op != OpAMD64SETNEF { 24761 break 24762 } 24763 cond := v_2.Args[0] 24764 if !(is32BitInt(t)) { 24765 break 24766 } 24767 v.reset(OpAMD64CMOVLNEF) 24768 v.AddArg3(y, x, cond) 24769 return true 24770 } 24771 // match: (CondSelect <t> x y (SETGF cond)) 24772 // cond: is32BitInt(t) 24773 // result: (CMOVLGTF y x cond) 24774 for { 24775 t := v.Type 24776 x := v_0 24777 y := v_1 24778 if v_2.Op != OpAMD64SETGF { 24779 break 24780 } 24781 cond := v_2.Args[0] 24782 if !(is32BitInt(t)) { 24783 break 24784 } 24785 v.reset(OpAMD64CMOVLGTF) 24786 v.AddArg3(y, x, cond) 24787 return true 24788 } 24789 // match: (CondSelect <t> x y (SETGEF cond)) 24790 // cond: is32BitInt(t) 24791 // result: (CMOVLGEF y x cond) 24792 for { 24793 t := v.Type 24794 x := v_0 24795 y := v_1 24796 if v_2.Op != OpAMD64SETGEF { 24797 break 24798 } 24799 cond := v_2.Args[0] 24800 if !(is32BitInt(t)) { 24801 break 24802 } 24803 v.reset(OpAMD64CMOVLGEF) 24804 v.AddArg3(y, x, cond) 24805 return true 24806 } 24807 // match: (CondSelect <t> x y (SETEQ cond)) 24808 // cond: is16BitInt(t) 24809 // result: (CMOVWEQ y x cond) 24810 for { 24811 t := v.Type 24812 x := v_0 24813 y := v_1 24814 if v_2.Op != OpAMD64SETEQ { 24815 break 24816 } 24817 cond := v_2.Args[0] 24818 if !(is16BitInt(t)) { 24819 break 24820 } 24821 v.reset(OpAMD64CMOVWEQ) 24822 v.AddArg3(y, x, cond) 24823 return true 24824 } 24825 // match: (CondSelect <t> x y (SETNE cond)) 24826 // cond: is16BitInt(t) 24827 // result: (CMOVWNE y x cond) 24828 for { 24829 t := v.Type 24830 x := v_0 24831 y := v_1 24832 if v_2.Op != OpAMD64SETNE { 24833 break 24834 } 24835 cond := v_2.Args[0] 24836 if !(is16BitInt(t)) { 24837 break 24838 } 24839 v.reset(OpAMD64CMOVWNE) 24840 v.AddArg3(y, x, cond) 24841 return true 24842 } 24843 // match: (CondSelect <t> x y (SETL cond)) 24844 // cond: is16BitInt(t) 24845 // result: (CMOVWLT y x cond) 24846 for { 24847 t := v.Type 24848 x := v_0 24849 y := v_1 24850 if v_2.Op != OpAMD64SETL { 24851 break 24852 } 24853 cond := v_2.Args[0] 24854 if !(is16BitInt(t)) { 24855 break 24856 } 24857 v.reset(OpAMD64CMOVWLT) 24858 v.AddArg3(y, x, cond) 24859 return true 24860 } 24861 // match: (CondSelect <t> x y (SETG cond)) 24862 // cond: is16BitInt(t) 24863 // result: (CMOVWGT y x cond) 24864 for { 24865 t := v.Type 24866 x := v_0 24867 y := v_1 24868 if v_2.Op != OpAMD64SETG { 24869 break 24870 } 24871 cond := v_2.Args[0] 24872 if !(is16BitInt(t)) { 24873 break 24874 } 24875 v.reset(OpAMD64CMOVWGT) 24876 v.AddArg3(y, x, cond) 24877 return true 24878 } 24879 // match: (CondSelect <t> x y (SETLE cond)) 24880 // cond: is16BitInt(t) 24881 // result: (CMOVWLE y x cond) 24882 for { 24883 t := v.Type 24884 x := v_0 24885 y := v_1 24886 if v_2.Op != OpAMD64SETLE { 24887 break 24888 } 24889 cond := v_2.Args[0] 24890 if !(is16BitInt(t)) { 24891 break 24892 } 24893 v.reset(OpAMD64CMOVWLE) 24894 v.AddArg3(y, x, cond) 24895 return true 24896 } 24897 // match: (CondSelect <t> x y (SETGE cond)) 24898 // cond: is16BitInt(t) 24899 // result: (CMOVWGE y x cond) 24900 for { 24901 t := v.Type 24902 x := v_0 24903 y := v_1 24904 if v_2.Op != OpAMD64SETGE { 24905 break 24906 } 24907 cond := v_2.Args[0] 24908 if !(is16BitInt(t)) { 24909 break 24910 } 24911 v.reset(OpAMD64CMOVWGE) 24912 v.AddArg3(y, x, cond) 24913 return true 24914 } 24915 // match: (CondSelect <t> x y (SETA cond)) 24916 // cond: is16BitInt(t) 24917 // result: (CMOVWHI y x cond) 24918 for { 24919 t := v.Type 24920 x := v_0 24921 y := v_1 24922 if v_2.Op != OpAMD64SETA { 24923 break 24924 } 24925 cond := v_2.Args[0] 24926 if !(is16BitInt(t)) { 24927 break 24928 } 24929 v.reset(OpAMD64CMOVWHI) 24930 v.AddArg3(y, x, cond) 24931 return true 24932 } 24933 // match: (CondSelect <t> x y (SETB cond)) 24934 // cond: is16BitInt(t) 24935 // result: (CMOVWCS y x cond) 24936 for { 24937 t := v.Type 24938 x := v_0 24939 y := v_1 24940 if v_2.Op != OpAMD64SETB { 24941 break 24942 } 24943 cond := v_2.Args[0] 24944 if !(is16BitInt(t)) { 24945 break 24946 } 24947 v.reset(OpAMD64CMOVWCS) 24948 v.AddArg3(y, x, cond) 24949 return true 24950 } 24951 // match: (CondSelect <t> x y (SETAE cond)) 24952 // cond: is16BitInt(t) 24953 // result: (CMOVWCC y x cond) 24954 for { 24955 t := v.Type 24956 x := v_0 24957 y := v_1 24958 if v_2.Op != OpAMD64SETAE { 24959 break 24960 } 24961 cond := v_2.Args[0] 24962 if !(is16BitInt(t)) { 24963 break 24964 } 24965 v.reset(OpAMD64CMOVWCC) 24966 v.AddArg3(y, x, cond) 24967 return true 24968 } 24969 // match: (CondSelect <t> x y (SETBE cond)) 24970 // cond: is16BitInt(t) 24971 // result: (CMOVWLS y x cond) 24972 for { 24973 t := v.Type 24974 x := v_0 24975 y := v_1 24976 if v_2.Op != OpAMD64SETBE { 24977 break 24978 } 24979 cond := v_2.Args[0] 24980 if !(is16BitInt(t)) { 24981 break 24982 } 24983 v.reset(OpAMD64CMOVWLS) 24984 v.AddArg3(y, x, cond) 24985 return true 24986 } 24987 // match: (CondSelect <t> x y (SETEQF cond)) 24988 // cond: is16BitInt(t) 24989 // result: (CMOVWEQF y x cond) 24990 for { 24991 t := v.Type 24992 x := v_0 24993 y := v_1 24994 if v_2.Op != OpAMD64SETEQF { 24995 break 24996 } 24997 cond := v_2.Args[0] 24998 if !(is16BitInt(t)) { 24999 break 25000 } 25001 v.reset(OpAMD64CMOVWEQF) 25002 v.AddArg3(y, x, cond) 25003 return true 25004 } 25005 // match: (CondSelect <t> x y (SETNEF cond)) 25006 // cond: is16BitInt(t) 25007 // result: (CMOVWNEF y x cond) 25008 for { 25009 t := v.Type 25010 x := v_0 25011 y := v_1 25012 if v_2.Op != OpAMD64SETNEF { 25013 break 25014 } 25015 cond := v_2.Args[0] 25016 if !(is16BitInt(t)) { 25017 break 25018 } 25019 v.reset(OpAMD64CMOVWNEF) 25020 v.AddArg3(y, x, cond) 25021 return true 25022 } 25023 // match: (CondSelect <t> x y (SETGF cond)) 25024 // cond: is16BitInt(t) 25025 // result: (CMOVWGTF y x cond) 25026 for { 25027 t := v.Type 25028 x := v_0 25029 y := v_1 25030 if v_2.Op != OpAMD64SETGF { 25031 break 25032 } 25033 cond := v_2.Args[0] 25034 if !(is16BitInt(t)) { 25035 break 25036 } 25037 v.reset(OpAMD64CMOVWGTF) 25038 v.AddArg3(y, x, cond) 25039 return true 25040 } 25041 // match: (CondSelect <t> x y (SETGEF cond)) 25042 // cond: is16BitInt(t) 25043 // result: (CMOVWGEF y x cond) 25044 for { 25045 t := v.Type 25046 x := v_0 25047 y := v_1 25048 if v_2.Op != OpAMD64SETGEF { 25049 break 25050 } 25051 cond := v_2.Args[0] 25052 if !(is16BitInt(t)) { 25053 break 25054 } 25055 v.reset(OpAMD64CMOVWGEF) 25056 v.AddArg3(y, x, cond) 25057 return true 25058 } 25059 // match: (CondSelect <t> x y check) 25060 // cond: !check.Type.IsFlags() && check.Type.Size() == 1 25061 // result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check)) 25062 for { 25063 t := v.Type 25064 x := v_0 25065 y := v_1 25066 check := v_2 25067 if !(!check.Type.IsFlags() && check.Type.Size() == 1) { 25068 break 25069 } 25070 v.reset(OpCondSelect) 25071 v.Type = t 25072 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) 25073 v0.AddArg(check) 25074 v.AddArg3(x, y, v0) 25075 return true 25076 } 25077 // match: (CondSelect <t> x y check) 25078 // cond: !check.Type.IsFlags() && check.Type.Size() == 2 25079 // result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check)) 25080 for { 25081 t := v.Type 25082 x := v_0 25083 y := v_1 25084 check := v_2 25085 if !(!check.Type.IsFlags() && check.Type.Size() == 2) { 25086 break 25087 } 25088 v.reset(OpCondSelect) 25089 v.Type = t 25090 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) 25091 v0.AddArg(check) 25092 v.AddArg3(x, y, v0) 25093 return true 25094 } 25095 // match: (CondSelect <t> x y check) 25096 // cond: !check.Type.IsFlags() && check.Type.Size() == 4 25097 // result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check)) 25098 for { 25099 t := v.Type 25100 x := v_0 25101 y := v_1 25102 check := v_2 25103 if !(!check.Type.IsFlags() && check.Type.Size() == 4) { 25104 break 25105 } 25106 v.reset(OpCondSelect) 25107 v.Type = t 25108 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 25109 v0.AddArg(check) 25110 v.AddArg3(x, y, v0) 25111 return true 25112 } 25113 // match: (CondSelect <t> x y check) 25114 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) 25115 // result: (CMOVQNE y x (CMPQconst [0] check)) 25116 for { 25117 t := v.Type 25118 x := v_0 25119 y := v_1 25120 check := v_2 25121 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { 25122 break 25123 } 25124 v.reset(OpAMD64CMOVQNE) 25125 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 25126 v0.AuxInt = int32ToAuxInt(0) 25127 v0.AddArg(check) 25128 v.AddArg3(y, x, v0) 25129 return true 25130 } 25131 // match: (CondSelect <t> x y check) 25132 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) 25133 // result: (CMOVLNE y x (CMPQconst [0] check)) 25134 for { 25135 t := v.Type 25136 x := v_0 25137 y := v_1 25138 check := v_2 25139 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { 25140 break 25141 } 25142 v.reset(OpAMD64CMOVLNE) 25143 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 25144 v0.AuxInt = int32ToAuxInt(0) 25145 v0.AddArg(check) 25146 v.AddArg3(y, x, v0) 25147 return true 25148 } 25149 // match: (CondSelect <t> x y check) 25150 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) 25151 // result: (CMOVWNE y x (CMPQconst [0] check)) 25152 for { 25153 t := v.Type 25154 x := v_0 25155 y := v_1 25156 check := v_2 25157 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { 25158 break 25159 } 25160 v.reset(OpAMD64CMOVWNE) 25161 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 25162 v0.AuxInt = int32ToAuxInt(0) 25163 v0.AddArg(check) 25164 v.AddArg3(y, x, v0) 25165 return true 25166 } 25167 return false 25168 } 25169 func rewriteValueAMD64_OpConst16(v *Value) bool { 25170 // match: (Const16 [c]) 25171 // result: (MOVLconst [int32(c)]) 25172 for { 25173 c := auxIntToInt16(v.AuxInt) 25174 v.reset(OpAMD64MOVLconst) 25175 v.AuxInt = int32ToAuxInt(int32(c)) 25176 return true 25177 } 25178 } 25179 func rewriteValueAMD64_OpConst8(v *Value) bool { 25180 // match: (Const8 [c]) 25181 // result: (MOVLconst [int32(c)]) 25182 for { 25183 c := auxIntToInt8(v.AuxInt) 25184 v.reset(OpAMD64MOVLconst) 25185 v.AuxInt = int32ToAuxInt(int32(c)) 25186 return true 25187 } 25188 } 25189 func rewriteValueAMD64_OpConstBool(v *Value) bool { 25190 // match: (ConstBool [c]) 25191 // result: (MOVLconst [b2i32(c)]) 25192 for { 25193 c := auxIntToBool(v.AuxInt) 25194 v.reset(OpAMD64MOVLconst) 25195 v.AuxInt = int32ToAuxInt(b2i32(c)) 25196 return true 25197 } 25198 } 25199 func rewriteValueAMD64_OpConstNil(v *Value) bool { 25200 // match: (ConstNil ) 25201 // result: (MOVQconst [0]) 25202 for { 25203 v.reset(OpAMD64MOVQconst) 25204 v.AuxInt = int64ToAuxInt(0) 25205 return true 25206 } 25207 } 25208 func rewriteValueAMD64_OpCtz16(v *Value) bool { 25209 v_0 := v.Args[0] 25210 b := v.Block 25211 typ := &b.Func.Config.Types 25212 // match: (Ctz16 x) 25213 // result: (BSFL (ORLconst <typ.UInt32> [1<<16] x)) 25214 for { 25215 x := v_0 25216 v.reset(OpAMD64BSFL) 25217 v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) 25218 v0.AuxInt = int32ToAuxInt(1 << 16) 25219 v0.AddArg(x) 25220 v.AddArg(v0) 25221 return true 25222 } 25223 } 25224 func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { 25225 v_0 := v.Args[0] 25226 // match: (Ctz16NonZero x) 25227 // cond: buildcfg.GOAMD64 >= 3 25228 // result: (TZCNTL x) 25229 for { 25230 x := v_0 25231 if !(buildcfg.GOAMD64 >= 3) { 25232 break 25233 } 25234 v.reset(OpAMD64TZCNTL) 25235 v.AddArg(x) 25236 return true 25237 } 25238 // match: (Ctz16NonZero x) 25239 // cond: buildcfg.GOAMD64 < 3 25240 // result: (BSFL x) 25241 for { 25242 x := v_0 25243 if !(buildcfg.GOAMD64 < 3) { 25244 break 25245 } 25246 v.reset(OpAMD64BSFL) 25247 v.AddArg(x) 25248 return true 25249 } 25250 return false 25251 } 25252 func rewriteValueAMD64_OpCtz32(v *Value) bool { 25253 v_0 := v.Args[0] 25254 b := v.Block 25255 typ := &b.Func.Config.Types 25256 // match: (Ctz32 x) 25257 // cond: buildcfg.GOAMD64 >= 3 25258 // result: (TZCNTL x) 25259 for { 25260 x := v_0 25261 if !(buildcfg.GOAMD64 >= 3) { 25262 break 25263 } 25264 v.reset(OpAMD64TZCNTL) 25265 v.AddArg(x) 25266 return true 25267 } 25268 // match: (Ctz32 x) 25269 // cond: buildcfg.GOAMD64 < 3 25270 // result: (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x))) 25271 for { 25272 x := v_0 25273 if !(buildcfg.GOAMD64 < 3) { 25274 break 25275 } 25276 v.reset(OpSelect0) 25277 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 25278 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) 25279 v1.AuxInt = int8ToAuxInt(32) 25280 v1.AddArg(x) 25281 v0.AddArg(v1) 25282 v.AddArg(v0) 25283 return true 25284 } 25285 return false 25286 } 25287 func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { 25288 v_0 := v.Args[0] 25289 // match: (Ctz32NonZero x) 25290 // cond: buildcfg.GOAMD64 >= 3 25291 // result: (TZCNTL x) 25292 for { 25293 x := v_0 25294 if !(buildcfg.GOAMD64 >= 3) { 25295 break 25296 } 25297 v.reset(OpAMD64TZCNTL) 25298 v.AddArg(x) 25299 return true 25300 } 25301 // match: (Ctz32NonZero x) 25302 // cond: buildcfg.GOAMD64 < 3 25303 // result: (BSFL x) 25304 for { 25305 x := v_0 25306 if !(buildcfg.GOAMD64 < 3) { 25307 break 25308 } 25309 v.reset(OpAMD64BSFL) 25310 v.AddArg(x) 25311 return true 25312 } 25313 return false 25314 } 25315 func rewriteValueAMD64_OpCtz64(v *Value) bool { 25316 v_0 := v.Args[0] 25317 b := v.Block 25318 typ := &b.Func.Config.Types 25319 // match: (Ctz64 x) 25320 // cond: buildcfg.GOAMD64 >= 3 25321 // result: (TZCNTQ x) 25322 for { 25323 x := v_0 25324 if !(buildcfg.GOAMD64 >= 3) { 25325 break 25326 } 25327 v.reset(OpAMD64TZCNTQ) 25328 v.AddArg(x) 25329 return true 25330 } 25331 // match: (Ctz64 <t> x) 25332 // cond: buildcfg.GOAMD64 < 3 25333 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 25334 for { 25335 t := v.Type 25336 x := v_0 25337 if !(buildcfg.GOAMD64 < 3) { 25338 break 25339 } 25340 v.reset(OpAMD64CMOVQEQ) 25341 v0 := b.NewValue0(v.Pos, OpSelect0, t) 25342 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 25343 v1.AddArg(x) 25344 v0.AddArg(v1) 25345 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 25346 v2.AuxInt = int64ToAuxInt(64) 25347 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 25348 v3.AddArg(v1) 25349 v.AddArg3(v0, v2, v3) 25350 return true 25351 } 25352 return false 25353 } 25354 func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { 25355 v_0 := v.Args[0] 25356 b := v.Block 25357 typ := &b.Func.Config.Types 25358 // match: (Ctz64NonZero x) 25359 // cond: buildcfg.GOAMD64 >= 3 25360 // result: (TZCNTQ x) 25361 for { 25362 x := v_0 25363 if !(buildcfg.GOAMD64 >= 3) { 25364 break 25365 } 25366 v.reset(OpAMD64TZCNTQ) 25367 v.AddArg(x) 25368 return true 25369 } 25370 // match: (Ctz64NonZero x) 25371 // cond: buildcfg.GOAMD64 < 3 25372 // result: (Select0 (BSFQ x)) 25373 for { 25374 x := v_0 25375 if !(buildcfg.GOAMD64 < 3) { 25376 break 25377 } 25378 v.reset(OpSelect0) 25379 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 25380 v0.AddArg(x) 25381 v.AddArg(v0) 25382 return true 25383 } 25384 return false 25385 } 25386 func rewriteValueAMD64_OpCtz8(v *Value) bool { 25387 v_0 := v.Args[0] 25388 b := v.Block 25389 typ := &b.Func.Config.Types 25390 // match: (Ctz8 x) 25391 // result: (BSFL (ORLconst <typ.UInt32> [1<<8 ] x)) 25392 for { 25393 x := v_0 25394 v.reset(OpAMD64BSFL) 25395 v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) 25396 v0.AuxInt = int32ToAuxInt(1 << 8) 25397 v0.AddArg(x) 25398 v.AddArg(v0) 25399 return true 25400 } 25401 } 25402 func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { 25403 v_0 := v.Args[0] 25404 // match: (Ctz8NonZero x) 25405 // cond: buildcfg.GOAMD64 >= 3 25406 // result: (TZCNTL x) 25407 for { 25408 x := v_0 25409 if !(buildcfg.GOAMD64 >= 3) { 25410 break 25411 } 25412 v.reset(OpAMD64TZCNTL) 25413 v.AddArg(x) 25414 return true 25415 } 25416 // match: (Ctz8NonZero x) 25417 // cond: buildcfg.GOAMD64 < 3 25418 // result: (BSFL x) 25419 for { 25420 x := v_0 25421 if !(buildcfg.GOAMD64 < 3) { 25422 break 25423 } 25424 v.reset(OpAMD64BSFL) 25425 v.AddArg(x) 25426 return true 25427 } 25428 return false 25429 } 25430 func rewriteValueAMD64_OpDiv16(v *Value) bool { 25431 v_1 := v.Args[1] 25432 v_0 := v.Args[0] 25433 b := v.Block 25434 typ := &b.Func.Config.Types 25435 // match: (Div16 [a] x y) 25436 // result: (Select0 (DIVW [a] x y)) 25437 for { 25438 a := auxIntToBool(v.AuxInt) 25439 x := v_0 25440 y := v_1 25441 v.reset(OpSelect0) 25442 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 25443 v0.AuxInt = boolToAuxInt(a) 25444 v0.AddArg2(x, y) 25445 v.AddArg(v0) 25446 return true 25447 } 25448 } 25449 func rewriteValueAMD64_OpDiv16u(v *Value) bool { 25450 v_1 := v.Args[1] 25451 v_0 := v.Args[0] 25452 b := v.Block 25453 typ := &b.Func.Config.Types 25454 // match: (Div16u x y) 25455 // result: (Select0 (DIVWU x y)) 25456 for { 25457 x := v_0 25458 y := v_1 25459 v.reset(OpSelect0) 25460 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 25461 v0.AddArg2(x, y) 25462 v.AddArg(v0) 25463 return true 25464 } 25465 } 25466 func rewriteValueAMD64_OpDiv32(v *Value) bool { 25467 v_1 := v.Args[1] 25468 v_0 := v.Args[0] 25469 b := v.Block 25470 typ := &b.Func.Config.Types 25471 // match: (Div32 [a] x y) 25472 // result: (Select0 (DIVL [a] x y)) 25473 for { 25474 a := auxIntToBool(v.AuxInt) 25475 x := v_0 25476 y := v_1 25477 v.reset(OpSelect0) 25478 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 25479 v0.AuxInt = boolToAuxInt(a) 25480 v0.AddArg2(x, y) 25481 v.AddArg(v0) 25482 return true 25483 } 25484 } 25485 func rewriteValueAMD64_OpDiv32u(v *Value) bool { 25486 v_1 := v.Args[1] 25487 v_0 := v.Args[0] 25488 b := v.Block 25489 typ := &b.Func.Config.Types 25490 // match: (Div32u x y) 25491 // result: (Select0 (DIVLU x y)) 25492 for { 25493 x := v_0 25494 y := v_1 25495 v.reset(OpSelect0) 25496 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 25497 v0.AddArg2(x, y) 25498 v.AddArg(v0) 25499 return true 25500 } 25501 } 25502 func rewriteValueAMD64_OpDiv64(v *Value) bool { 25503 v_1 := v.Args[1] 25504 v_0 := v.Args[0] 25505 b := v.Block 25506 typ := &b.Func.Config.Types 25507 // match: (Div64 [a] x y) 25508 // result: (Select0 (DIVQ [a] x y)) 25509 for { 25510 a := auxIntToBool(v.AuxInt) 25511 x := v_0 25512 y := v_1 25513 v.reset(OpSelect0) 25514 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 25515 v0.AuxInt = boolToAuxInt(a) 25516 v0.AddArg2(x, y) 25517 v.AddArg(v0) 25518 return true 25519 } 25520 } 25521 func rewriteValueAMD64_OpDiv64u(v *Value) bool { 25522 v_1 := v.Args[1] 25523 v_0 := v.Args[0] 25524 b := v.Block 25525 typ := &b.Func.Config.Types 25526 // match: (Div64u x y) 25527 // result: (Select0 (DIVQU x y)) 25528 for { 25529 x := v_0 25530 y := v_1 25531 v.reset(OpSelect0) 25532 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 25533 v0.AddArg2(x, y) 25534 v.AddArg(v0) 25535 return true 25536 } 25537 } 25538 func rewriteValueAMD64_OpDiv8(v *Value) bool { 25539 v_1 := v.Args[1] 25540 v_0 := v.Args[0] 25541 b := v.Block 25542 typ := &b.Func.Config.Types 25543 // match: (Div8 x y) 25544 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 25545 for { 25546 x := v_0 25547 y := v_1 25548 v.reset(OpSelect0) 25549 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 25550 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 25551 v1.AddArg(x) 25552 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 25553 v2.AddArg(y) 25554 v0.AddArg2(v1, v2) 25555 v.AddArg(v0) 25556 return true 25557 } 25558 } 25559 func rewriteValueAMD64_OpDiv8u(v *Value) bool { 25560 v_1 := v.Args[1] 25561 v_0 := v.Args[0] 25562 b := v.Block 25563 typ := &b.Func.Config.Types 25564 // match: (Div8u x y) 25565 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 25566 for { 25567 x := v_0 25568 y := v_1 25569 v.reset(OpSelect0) 25570 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 25571 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 25572 v1.AddArg(x) 25573 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 25574 v2.AddArg(y) 25575 v0.AddArg2(v1, v2) 25576 v.AddArg(v0) 25577 return true 25578 } 25579 } 25580 func rewriteValueAMD64_OpEq16(v *Value) bool { 25581 v_1 := v.Args[1] 25582 v_0 := v.Args[0] 25583 b := v.Block 25584 // match: (Eq16 x y) 25585 // result: (SETEQ (CMPW x y)) 25586 for { 25587 x := v_0 25588 y := v_1 25589 v.reset(OpAMD64SETEQ) 25590 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 25591 v0.AddArg2(x, y) 25592 v.AddArg(v0) 25593 return true 25594 } 25595 } 25596 func rewriteValueAMD64_OpEq32(v *Value) bool { 25597 v_1 := v.Args[1] 25598 v_0 := v.Args[0] 25599 b := v.Block 25600 // match: (Eq32 x y) 25601 // result: (SETEQ (CMPL x y)) 25602 for { 25603 x := v_0 25604 y := v_1 25605 v.reset(OpAMD64SETEQ) 25606 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 25607 v0.AddArg2(x, y) 25608 v.AddArg(v0) 25609 return true 25610 } 25611 } 25612 func rewriteValueAMD64_OpEq32F(v *Value) bool { 25613 v_1 := v.Args[1] 25614 v_0 := v.Args[0] 25615 b := v.Block 25616 // match: (Eq32F x y) 25617 // result: (SETEQF (UCOMISS x y)) 25618 for { 25619 x := v_0 25620 y := v_1 25621 v.reset(OpAMD64SETEQF) 25622 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 25623 v0.AddArg2(x, y) 25624 v.AddArg(v0) 25625 return true 25626 } 25627 } 25628 func rewriteValueAMD64_OpEq64(v *Value) bool { 25629 v_1 := v.Args[1] 25630 v_0 := v.Args[0] 25631 b := v.Block 25632 // match: (Eq64 x y) 25633 // result: (SETEQ (CMPQ x y)) 25634 for { 25635 x := v_0 25636 y := v_1 25637 v.reset(OpAMD64SETEQ) 25638 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 25639 v0.AddArg2(x, y) 25640 v.AddArg(v0) 25641 return true 25642 } 25643 } 25644 func rewriteValueAMD64_OpEq64F(v *Value) bool { 25645 v_1 := v.Args[1] 25646 v_0 := v.Args[0] 25647 b := v.Block 25648 // match: (Eq64F x y) 25649 // result: (SETEQF (UCOMISD x y)) 25650 for { 25651 x := v_0 25652 y := v_1 25653 v.reset(OpAMD64SETEQF) 25654 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 25655 v0.AddArg2(x, y) 25656 v.AddArg(v0) 25657 return true 25658 } 25659 } 25660 func rewriteValueAMD64_OpEq8(v *Value) bool { 25661 v_1 := v.Args[1] 25662 v_0 := v.Args[0] 25663 b := v.Block 25664 // match: (Eq8 x y) 25665 // result: (SETEQ (CMPB x y)) 25666 for { 25667 x := v_0 25668 y := v_1 25669 v.reset(OpAMD64SETEQ) 25670 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 25671 v0.AddArg2(x, y) 25672 v.AddArg(v0) 25673 return true 25674 } 25675 } 25676 func rewriteValueAMD64_OpEqB(v *Value) bool { 25677 v_1 := v.Args[1] 25678 v_0 := v.Args[0] 25679 b := v.Block 25680 // match: (EqB x y) 25681 // result: (SETEQ (CMPB x y)) 25682 for { 25683 x := v_0 25684 y := v_1 25685 v.reset(OpAMD64SETEQ) 25686 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 25687 v0.AddArg2(x, y) 25688 v.AddArg(v0) 25689 return true 25690 } 25691 } 25692 func rewriteValueAMD64_OpEqPtr(v *Value) bool { 25693 v_1 := v.Args[1] 25694 v_0 := v.Args[0] 25695 b := v.Block 25696 // match: (EqPtr x y) 25697 // result: (SETEQ (CMPQ x y)) 25698 for { 25699 x := v_0 25700 y := v_1 25701 v.reset(OpAMD64SETEQ) 25702 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 25703 v0.AddArg2(x, y) 25704 v.AddArg(v0) 25705 return true 25706 } 25707 } 25708 func rewriteValueAMD64_OpFMA(v *Value) bool { 25709 v_2 := v.Args[2] 25710 v_1 := v.Args[1] 25711 v_0 := v.Args[0] 25712 // match: (FMA x y z) 25713 // result: (VFMADD231SD z x y) 25714 for { 25715 x := v_0 25716 y := v_1 25717 z := v_2 25718 v.reset(OpAMD64VFMADD231SD) 25719 v.AddArg3(z, x, y) 25720 return true 25721 } 25722 } 25723 func rewriteValueAMD64_OpFloor(v *Value) bool { 25724 v_0 := v.Args[0] 25725 // match: (Floor x) 25726 // result: (ROUNDSD [1] x) 25727 for { 25728 x := v_0 25729 v.reset(OpAMD64ROUNDSD) 25730 v.AuxInt = int8ToAuxInt(1) 25731 v.AddArg(x) 25732 return true 25733 } 25734 } 25735 func rewriteValueAMD64_OpGetG(v *Value) bool { 25736 v_0 := v.Args[0] 25737 // match: (GetG mem) 25738 // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal 25739 // result: (LoweredGetG mem) 25740 for { 25741 mem := v_0 25742 if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { 25743 break 25744 } 25745 v.reset(OpAMD64LoweredGetG) 25746 v.AddArg(mem) 25747 return true 25748 } 25749 return false 25750 } 25751 func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { 25752 b := v.Block 25753 typ := &b.Func.Config.Types 25754 // match: (HasCPUFeature {s}) 25755 // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) 25756 for { 25757 s := auxToSym(v.Aux) 25758 v.reset(OpAMD64SETNE) 25759 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 25760 v0.AuxInt = int32ToAuxInt(0) 25761 v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) 25762 v1.Aux = symToAux(s) 25763 v0.AddArg(v1) 25764 v.AddArg(v0) 25765 return true 25766 } 25767 } 25768 func rewriteValueAMD64_OpIsInBounds(v *Value) bool { 25769 v_1 := v.Args[1] 25770 v_0 := v.Args[0] 25771 b := v.Block 25772 // match: (IsInBounds idx len) 25773 // result: (SETB (CMPQ idx len)) 25774 for { 25775 idx := v_0 25776 len := v_1 25777 v.reset(OpAMD64SETB) 25778 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 25779 v0.AddArg2(idx, len) 25780 v.AddArg(v0) 25781 return true 25782 } 25783 } 25784 func rewriteValueAMD64_OpIsNonNil(v *Value) bool { 25785 v_0 := v.Args[0] 25786 b := v.Block 25787 // match: (IsNonNil p) 25788 // result: (SETNE (TESTQ p p)) 25789 for { 25790 p := v_0 25791 v.reset(OpAMD64SETNE) 25792 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 25793 v0.AddArg2(p, p) 25794 v.AddArg(v0) 25795 return true 25796 } 25797 } 25798 func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { 25799 v_1 := v.Args[1] 25800 v_0 := v.Args[0] 25801 b := v.Block 25802 // match: (IsSliceInBounds idx len) 25803 // result: (SETBE (CMPQ idx len)) 25804 for { 25805 idx := v_0 25806 len := v_1 25807 v.reset(OpAMD64SETBE) 25808 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 25809 v0.AddArg2(idx, len) 25810 v.AddArg(v0) 25811 return true 25812 } 25813 } 25814 func rewriteValueAMD64_OpLeq16(v *Value) bool { 25815 v_1 := v.Args[1] 25816 v_0 := v.Args[0] 25817 b := v.Block 25818 // match: (Leq16 x y) 25819 // result: (SETLE (CMPW x y)) 25820 for { 25821 x := v_0 25822 y := v_1 25823 v.reset(OpAMD64SETLE) 25824 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 25825 v0.AddArg2(x, y) 25826 v.AddArg(v0) 25827 return true 25828 } 25829 } 25830 func rewriteValueAMD64_OpLeq16U(v *Value) bool { 25831 v_1 := v.Args[1] 25832 v_0 := v.Args[0] 25833 b := v.Block 25834 // match: (Leq16U x y) 25835 // result: (SETBE (CMPW x y)) 25836 for { 25837 x := v_0 25838 y := v_1 25839 v.reset(OpAMD64SETBE) 25840 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 25841 v0.AddArg2(x, y) 25842 v.AddArg(v0) 25843 return true 25844 } 25845 } 25846 func rewriteValueAMD64_OpLeq32(v *Value) bool { 25847 v_1 := v.Args[1] 25848 v_0 := v.Args[0] 25849 b := v.Block 25850 // match: (Leq32 x y) 25851 // result: (SETLE (CMPL x y)) 25852 for { 25853 x := v_0 25854 y := v_1 25855 v.reset(OpAMD64SETLE) 25856 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 25857 v0.AddArg2(x, y) 25858 v.AddArg(v0) 25859 return true 25860 } 25861 } 25862 func rewriteValueAMD64_OpLeq32F(v *Value) bool { 25863 v_1 := v.Args[1] 25864 v_0 := v.Args[0] 25865 b := v.Block 25866 // match: (Leq32F x y) 25867 // result: (SETGEF (UCOMISS y x)) 25868 for { 25869 x := v_0 25870 y := v_1 25871 v.reset(OpAMD64SETGEF) 25872 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 25873 v0.AddArg2(y, x) 25874 v.AddArg(v0) 25875 return true 25876 } 25877 } 25878 func rewriteValueAMD64_OpLeq32U(v *Value) bool { 25879 v_1 := v.Args[1] 25880 v_0 := v.Args[0] 25881 b := v.Block 25882 // match: (Leq32U x y) 25883 // result: (SETBE (CMPL x y)) 25884 for { 25885 x := v_0 25886 y := v_1 25887 v.reset(OpAMD64SETBE) 25888 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 25889 v0.AddArg2(x, y) 25890 v.AddArg(v0) 25891 return true 25892 } 25893 } 25894 func rewriteValueAMD64_OpLeq64(v *Value) bool { 25895 v_1 := v.Args[1] 25896 v_0 := v.Args[0] 25897 b := v.Block 25898 // match: (Leq64 x y) 25899 // result: (SETLE (CMPQ x y)) 25900 for { 25901 x := v_0 25902 y := v_1 25903 v.reset(OpAMD64SETLE) 25904 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 25905 v0.AddArg2(x, y) 25906 v.AddArg(v0) 25907 return true 25908 } 25909 } 25910 func rewriteValueAMD64_OpLeq64F(v *Value) bool { 25911 v_1 := v.Args[1] 25912 v_0 := v.Args[0] 25913 b := v.Block 25914 // match: (Leq64F x y) 25915 // result: (SETGEF (UCOMISD y x)) 25916 for { 25917 x := v_0 25918 y := v_1 25919 v.reset(OpAMD64SETGEF) 25920 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 25921 v0.AddArg2(y, x) 25922 v.AddArg(v0) 25923 return true 25924 } 25925 } 25926 func rewriteValueAMD64_OpLeq64U(v *Value) bool { 25927 v_1 := v.Args[1] 25928 v_0 := v.Args[0] 25929 b := v.Block 25930 // match: (Leq64U x y) 25931 // result: (SETBE (CMPQ x y)) 25932 for { 25933 x := v_0 25934 y := v_1 25935 v.reset(OpAMD64SETBE) 25936 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 25937 v0.AddArg2(x, y) 25938 v.AddArg(v0) 25939 return true 25940 } 25941 } 25942 func rewriteValueAMD64_OpLeq8(v *Value) bool { 25943 v_1 := v.Args[1] 25944 v_0 := v.Args[0] 25945 b := v.Block 25946 // match: (Leq8 x y) 25947 // result: (SETLE (CMPB x y)) 25948 for { 25949 x := v_0 25950 y := v_1 25951 v.reset(OpAMD64SETLE) 25952 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 25953 v0.AddArg2(x, y) 25954 v.AddArg(v0) 25955 return true 25956 } 25957 } 25958 func rewriteValueAMD64_OpLeq8U(v *Value) bool { 25959 v_1 := v.Args[1] 25960 v_0 := v.Args[0] 25961 b := v.Block 25962 // match: (Leq8U x y) 25963 // result: (SETBE (CMPB x y)) 25964 for { 25965 x := v_0 25966 y := v_1 25967 v.reset(OpAMD64SETBE) 25968 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 25969 v0.AddArg2(x, y) 25970 v.AddArg(v0) 25971 return true 25972 } 25973 } 25974 func rewriteValueAMD64_OpLess16(v *Value) bool { 25975 v_1 := v.Args[1] 25976 v_0 := v.Args[0] 25977 b := v.Block 25978 // match: (Less16 x y) 25979 // result: (SETL (CMPW x y)) 25980 for { 25981 x := v_0 25982 y := v_1 25983 v.reset(OpAMD64SETL) 25984 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 25985 v0.AddArg2(x, y) 25986 v.AddArg(v0) 25987 return true 25988 } 25989 } 25990 func rewriteValueAMD64_OpLess16U(v *Value) bool { 25991 v_1 := v.Args[1] 25992 v_0 := v.Args[0] 25993 b := v.Block 25994 // match: (Less16U x y) 25995 // result: (SETB (CMPW x y)) 25996 for { 25997 x := v_0 25998 y := v_1 25999 v.reset(OpAMD64SETB) 26000 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 26001 v0.AddArg2(x, y) 26002 v.AddArg(v0) 26003 return true 26004 } 26005 } 26006 func rewriteValueAMD64_OpLess32(v *Value) bool { 26007 v_1 := v.Args[1] 26008 v_0 := v.Args[0] 26009 b := v.Block 26010 // match: (Less32 x y) 26011 // result: (SETL (CMPL x y)) 26012 for { 26013 x := v_0 26014 y := v_1 26015 v.reset(OpAMD64SETL) 26016 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 26017 v0.AddArg2(x, y) 26018 v.AddArg(v0) 26019 return true 26020 } 26021 } 26022 func rewriteValueAMD64_OpLess32F(v *Value) bool { 26023 v_1 := v.Args[1] 26024 v_0 := v.Args[0] 26025 b := v.Block 26026 // match: (Less32F x y) 26027 // result: (SETGF (UCOMISS y x)) 26028 for { 26029 x := v_0 26030 y := v_1 26031 v.reset(OpAMD64SETGF) 26032 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 26033 v0.AddArg2(y, x) 26034 v.AddArg(v0) 26035 return true 26036 } 26037 } 26038 func rewriteValueAMD64_OpLess32U(v *Value) bool { 26039 v_1 := v.Args[1] 26040 v_0 := v.Args[0] 26041 b := v.Block 26042 // match: (Less32U x y) 26043 // result: (SETB (CMPL x y)) 26044 for { 26045 x := v_0 26046 y := v_1 26047 v.reset(OpAMD64SETB) 26048 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 26049 v0.AddArg2(x, y) 26050 v.AddArg(v0) 26051 return true 26052 } 26053 } 26054 func rewriteValueAMD64_OpLess64(v *Value) bool { 26055 v_1 := v.Args[1] 26056 v_0 := v.Args[0] 26057 b := v.Block 26058 // match: (Less64 x y) 26059 // result: (SETL (CMPQ x y)) 26060 for { 26061 x := v_0 26062 y := v_1 26063 v.reset(OpAMD64SETL) 26064 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 26065 v0.AddArg2(x, y) 26066 v.AddArg(v0) 26067 return true 26068 } 26069 } 26070 func rewriteValueAMD64_OpLess64F(v *Value) bool { 26071 v_1 := v.Args[1] 26072 v_0 := v.Args[0] 26073 b := v.Block 26074 // match: (Less64F x y) 26075 // result: (SETGF (UCOMISD y x)) 26076 for { 26077 x := v_0 26078 y := v_1 26079 v.reset(OpAMD64SETGF) 26080 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 26081 v0.AddArg2(y, x) 26082 v.AddArg(v0) 26083 return true 26084 } 26085 } 26086 func rewriteValueAMD64_OpLess64U(v *Value) bool { 26087 v_1 := v.Args[1] 26088 v_0 := v.Args[0] 26089 b := v.Block 26090 // match: (Less64U x y) 26091 // result: (SETB (CMPQ x y)) 26092 for { 26093 x := v_0 26094 y := v_1 26095 v.reset(OpAMD64SETB) 26096 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 26097 v0.AddArg2(x, y) 26098 v.AddArg(v0) 26099 return true 26100 } 26101 } 26102 func rewriteValueAMD64_OpLess8(v *Value) bool { 26103 v_1 := v.Args[1] 26104 v_0 := v.Args[0] 26105 b := v.Block 26106 // match: (Less8 x y) 26107 // result: (SETL (CMPB x y)) 26108 for { 26109 x := v_0 26110 y := v_1 26111 v.reset(OpAMD64SETL) 26112 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 26113 v0.AddArg2(x, y) 26114 v.AddArg(v0) 26115 return true 26116 } 26117 } 26118 func rewriteValueAMD64_OpLess8U(v *Value) bool { 26119 v_1 := v.Args[1] 26120 v_0 := v.Args[0] 26121 b := v.Block 26122 // match: (Less8U x y) 26123 // result: (SETB (CMPB x y)) 26124 for { 26125 x := v_0 26126 y := v_1 26127 v.reset(OpAMD64SETB) 26128 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 26129 v0.AddArg2(x, y) 26130 v.AddArg(v0) 26131 return true 26132 } 26133 } 26134 func rewriteValueAMD64_OpLoad(v *Value) bool { 26135 v_1 := v.Args[1] 26136 v_0 := v.Args[0] 26137 // match: (Load <t> ptr mem) 26138 // cond: (is64BitInt(t) || isPtr(t)) 26139 // result: (MOVQload ptr mem) 26140 for { 26141 t := v.Type 26142 ptr := v_0 26143 mem := v_1 26144 if !(is64BitInt(t) || isPtr(t)) { 26145 break 26146 } 26147 v.reset(OpAMD64MOVQload) 26148 v.AddArg2(ptr, mem) 26149 return true 26150 } 26151 // match: (Load <t> ptr mem) 26152 // cond: is32BitInt(t) 26153 // result: (MOVLload ptr mem) 26154 for { 26155 t := v.Type 26156 ptr := v_0 26157 mem := v_1 26158 if !(is32BitInt(t)) { 26159 break 26160 } 26161 v.reset(OpAMD64MOVLload) 26162 v.AddArg2(ptr, mem) 26163 return true 26164 } 26165 // match: (Load <t> ptr mem) 26166 // cond: is16BitInt(t) 26167 // result: (MOVWload ptr mem) 26168 for { 26169 t := v.Type 26170 ptr := v_0 26171 mem := v_1 26172 if !(is16BitInt(t)) { 26173 break 26174 } 26175 v.reset(OpAMD64MOVWload) 26176 v.AddArg2(ptr, mem) 26177 return true 26178 } 26179 // match: (Load <t> ptr mem) 26180 // cond: (t.IsBoolean() || is8BitInt(t)) 26181 // result: (MOVBload ptr mem) 26182 for { 26183 t := v.Type 26184 ptr := v_0 26185 mem := v_1 26186 if !(t.IsBoolean() || is8BitInt(t)) { 26187 break 26188 } 26189 v.reset(OpAMD64MOVBload) 26190 v.AddArg2(ptr, mem) 26191 return true 26192 } 26193 // match: (Load <t> ptr mem) 26194 // cond: is32BitFloat(t) 26195 // result: (MOVSSload ptr mem) 26196 for { 26197 t := v.Type 26198 ptr := v_0 26199 mem := v_1 26200 if !(is32BitFloat(t)) { 26201 break 26202 } 26203 v.reset(OpAMD64MOVSSload) 26204 v.AddArg2(ptr, mem) 26205 return true 26206 } 26207 // match: (Load <t> ptr mem) 26208 // cond: is64BitFloat(t) 26209 // result: (MOVSDload ptr mem) 26210 for { 26211 t := v.Type 26212 ptr := v_0 26213 mem := v_1 26214 if !(is64BitFloat(t)) { 26215 break 26216 } 26217 v.reset(OpAMD64MOVSDload) 26218 v.AddArg2(ptr, mem) 26219 return true 26220 } 26221 return false 26222 } 26223 func rewriteValueAMD64_OpLocalAddr(v *Value) bool { 26224 v_1 := v.Args[1] 26225 v_0 := v.Args[0] 26226 b := v.Block 26227 typ := &b.Func.Config.Types 26228 // match: (LocalAddr <t> {sym} base mem) 26229 // cond: t.Elem().HasPointers() 26230 // result: (LEAQ {sym} (SPanchored base mem)) 26231 for { 26232 t := v.Type 26233 sym := auxToSym(v.Aux) 26234 base := v_0 26235 mem := v_1 26236 if !(t.Elem().HasPointers()) { 26237 break 26238 } 26239 v.reset(OpAMD64LEAQ) 26240 v.Aux = symToAux(sym) 26241 v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) 26242 v0.AddArg2(base, mem) 26243 v.AddArg(v0) 26244 return true 26245 } 26246 // match: (LocalAddr <t> {sym} base _) 26247 // cond: !t.Elem().HasPointers() 26248 // result: (LEAQ {sym} base) 26249 for { 26250 t := v.Type 26251 sym := auxToSym(v.Aux) 26252 base := v_0 26253 if !(!t.Elem().HasPointers()) { 26254 break 26255 } 26256 v.reset(OpAMD64LEAQ) 26257 v.Aux = symToAux(sym) 26258 v.AddArg(base) 26259 return true 26260 } 26261 return false 26262 } 26263 func rewriteValueAMD64_OpLsh16x16(v *Value) bool { 26264 v_1 := v.Args[1] 26265 v_0 := v.Args[0] 26266 b := v.Block 26267 // match: (Lsh16x16 <t> x y) 26268 // cond: !shiftIsBounded(v) 26269 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 26270 for { 26271 t := v.Type 26272 x := v_0 26273 y := v_1 26274 if !(!shiftIsBounded(v)) { 26275 break 26276 } 26277 v.reset(OpAMD64ANDL) 26278 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 26279 v0.AddArg2(x, y) 26280 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 26281 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 26282 v2.AuxInt = int16ToAuxInt(32) 26283 v2.AddArg(y) 26284 v1.AddArg(v2) 26285 v.AddArg2(v0, v1) 26286 return true 26287 } 26288 // match: (Lsh16x16 x y) 26289 // cond: shiftIsBounded(v) 26290 // result: (SHLL x y) 26291 for { 26292 x := v_0 26293 y := v_1 26294 if !(shiftIsBounded(v)) { 26295 break 26296 } 26297 v.reset(OpAMD64SHLL) 26298 v.AddArg2(x, y) 26299 return true 26300 } 26301 return false 26302 } 26303 func rewriteValueAMD64_OpLsh16x32(v *Value) bool { 26304 v_1 := v.Args[1] 26305 v_0 := v.Args[0] 26306 b := v.Block 26307 // match: (Lsh16x32 <t> x y) 26308 // cond: !shiftIsBounded(v) 26309 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 26310 for { 26311 t := v.Type 26312 x := v_0 26313 y := v_1 26314 if !(!shiftIsBounded(v)) { 26315 break 26316 } 26317 v.reset(OpAMD64ANDL) 26318 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 26319 v0.AddArg2(x, y) 26320 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 26321 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 26322 v2.AuxInt = int32ToAuxInt(32) 26323 v2.AddArg(y) 26324 v1.AddArg(v2) 26325 v.AddArg2(v0, v1) 26326 return true 26327 } 26328 // match: (Lsh16x32 x y) 26329 // cond: shiftIsBounded(v) 26330 // result: (SHLL x y) 26331 for { 26332 x := v_0 26333 y := v_1 26334 if !(shiftIsBounded(v)) { 26335 break 26336 } 26337 v.reset(OpAMD64SHLL) 26338 v.AddArg2(x, y) 26339 return true 26340 } 26341 return false 26342 } 26343 func rewriteValueAMD64_OpLsh16x64(v *Value) bool { 26344 v_1 := v.Args[1] 26345 v_0 := v.Args[0] 26346 b := v.Block 26347 // match: (Lsh16x64 <t> x y) 26348 // cond: !shiftIsBounded(v) 26349 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 26350 for { 26351 t := v.Type 26352 x := v_0 26353 y := v_1 26354 if !(!shiftIsBounded(v)) { 26355 break 26356 } 26357 v.reset(OpAMD64ANDL) 26358 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 26359 v0.AddArg2(x, y) 26360 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 26361 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 26362 v2.AuxInt = int32ToAuxInt(32) 26363 v2.AddArg(y) 26364 v1.AddArg(v2) 26365 v.AddArg2(v0, v1) 26366 return true 26367 } 26368 // match: (Lsh16x64 x y) 26369 // cond: shiftIsBounded(v) 26370 // result: (SHLL x y) 26371 for { 26372 x := v_0 26373 y := v_1 26374 if !(shiftIsBounded(v)) { 26375 break 26376 } 26377 v.reset(OpAMD64SHLL) 26378 v.AddArg2(x, y) 26379 return true 26380 } 26381 return false 26382 } 26383 func rewriteValueAMD64_OpLsh16x8(v *Value) bool { 26384 v_1 := v.Args[1] 26385 v_0 := v.Args[0] 26386 b := v.Block 26387 // match: (Lsh16x8 <t> x y) 26388 // cond: !shiftIsBounded(v) 26389 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 26390 for { 26391 t := v.Type 26392 x := v_0 26393 y := v_1 26394 if !(!shiftIsBounded(v)) { 26395 break 26396 } 26397 v.reset(OpAMD64ANDL) 26398 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 26399 v0.AddArg2(x, y) 26400 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 26401 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 26402 v2.AuxInt = int8ToAuxInt(32) 26403 v2.AddArg(y) 26404 v1.AddArg(v2) 26405 v.AddArg2(v0, v1) 26406 return true 26407 } 26408 // match: (Lsh16x8 x y) 26409 // cond: shiftIsBounded(v) 26410 // result: (SHLL x y) 26411 for { 26412 x := v_0 26413 y := v_1 26414 if !(shiftIsBounded(v)) { 26415 break 26416 } 26417 v.reset(OpAMD64SHLL) 26418 v.AddArg2(x, y) 26419 return true 26420 } 26421 return false 26422 } 26423 func rewriteValueAMD64_OpLsh32x16(v *Value) bool { 26424 v_1 := v.Args[1] 26425 v_0 := v.Args[0] 26426 b := v.Block 26427 // match: (Lsh32x16 <t> x y) 26428 // cond: !shiftIsBounded(v) 26429 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 26430 for { 26431 t := v.Type 26432 x := v_0 26433 y := v_1 26434 if !(!shiftIsBounded(v)) { 26435 break 26436 } 26437 v.reset(OpAMD64ANDL) 26438 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 26439 v0.AddArg2(x, y) 26440 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 26441 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 26442 v2.AuxInt = int16ToAuxInt(32) 26443 v2.AddArg(y) 26444 v1.AddArg(v2) 26445 v.AddArg2(v0, v1) 26446 return true 26447 } 26448 // match: (Lsh32x16 x y) 26449 // cond: shiftIsBounded(v) 26450 // result: (SHLL x y) 26451 for { 26452 x := v_0 26453 y := v_1 26454 if !(shiftIsBounded(v)) { 26455 break 26456 } 26457 v.reset(OpAMD64SHLL) 26458 v.AddArg2(x, y) 26459 return true 26460 } 26461 return false 26462 } 26463 func rewriteValueAMD64_OpLsh32x32(v *Value) bool { 26464 v_1 := v.Args[1] 26465 v_0 := v.Args[0] 26466 b := v.Block 26467 // match: (Lsh32x32 <t> x y) 26468 // cond: !shiftIsBounded(v) 26469 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 26470 for { 26471 t := v.Type 26472 x := v_0 26473 y := v_1 26474 if !(!shiftIsBounded(v)) { 26475 break 26476 } 26477 v.reset(OpAMD64ANDL) 26478 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 26479 v0.AddArg2(x, y) 26480 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 26481 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 26482 v2.AuxInt = int32ToAuxInt(32) 26483 v2.AddArg(y) 26484 v1.AddArg(v2) 26485 v.AddArg2(v0, v1) 26486 return true 26487 } 26488 // match: (Lsh32x32 x y) 26489 // cond: shiftIsBounded(v) 26490 // result: (SHLL x y) 26491 for { 26492 x := v_0 26493 y := v_1 26494 if !(shiftIsBounded(v)) { 26495 break 26496 } 26497 v.reset(OpAMD64SHLL) 26498 v.AddArg2(x, y) 26499 return true 26500 } 26501 return false 26502 } 26503 func rewriteValueAMD64_OpLsh32x64(v *Value) bool { 26504 v_1 := v.Args[1] 26505 v_0 := v.Args[0] 26506 b := v.Block 26507 // match: (Lsh32x64 <t> x y) 26508 // cond: !shiftIsBounded(v) 26509 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 26510 for { 26511 t := v.Type 26512 x := v_0 26513 y := v_1 26514 if !(!shiftIsBounded(v)) { 26515 break 26516 } 26517 v.reset(OpAMD64ANDL) 26518 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 26519 v0.AddArg2(x, y) 26520 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 26521 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 26522 v2.AuxInt = int32ToAuxInt(32) 26523 v2.AddArg(y) 26524 v1.AddArg(v2) 26525 v.AddArg2(v0, v1) 26526 return true 26527 } 26528 // match: (Lsh32x64 x y) 26529 // cond: shiftIsBounded(v) 26530 // result: (SHLL x y) 26531 for { 26532 x := v_0 26533 y := v_1 26534 if !(shiftIsBounded(v)) { 26535 break 26536 } 26537 v.reset(OpAMD64SHLL) 26538 v.AddArg2(x, y) 26539 return true 26540 } 26541 return false 26542 } 26543 func rewriteValueAMD64_OpLsh32x8(v *Value) bool { 26544 v_1 := v.Args[1] 26545 v_0 := v.Args[0] 26546 b := v.Block 26547 // match: (Lsh32x8 <t> x y) 26548 // cond: !shiftIsBounded(v) 26549 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 26550 for { 26551 t := v.Type 26552 x := v_0 26553 y := v_1 26554 if !(!shiftIsBounded(v)) { 26555 break 26556 } 26557 v.reset(OpAMD64ANDL) 26558 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 26559 v0.AddArg2(x, y) 26560 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 26561 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 26562 v2.AuxInt = int8ToAuxInt(32) 26563 v2.AddArg(y) 26564 v1.AddArg(v2) 26565 v.AddArg2(v0, v1) 26566 return true 26567 } 26568 // match: (Lsh32x8 x y) 26569 // cond: shiftIsBounded(v) 26570 // result: (SHLL x y) 26571 for { 26572 x := v_0 26573 y := v_1 26574 if !(shiftIsBounded(v)) { 26575 break 26576 } 26577 v.reset(OpAMD64SHLL) 26578 v.AddArg2(x, y) 26579 return true 26580 } 26581 return false 26582 } 26583 func rewriteValueAMD64_OpLsh64x16(v *Value) bool { 26584 v_1 := v.Args[1] 26585 v_0 := v.Args[0] 26586 b := v.Block 26587 // match: (Lsh64x16 <t> x y) 26588 // cond: !shiftIsBounded(v) 26589 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 26590 for { 26591 t := v.Type 26592 x := v_0 26593 y := v_1 26594 if !(!shiftIsBounded(v)) { 26595 break 26596 } 26597 v.reset(OpAMD64ANDQ) 26598 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 26599 v0.AddArg2(x, y) 26600 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 26601 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 26602 v2.AuxInt = int16ToAuxInt(64) 26603 v2.AddArg(y) 26604 v1.AddArg(v2) 26605 v.AddArg2(v0, v1) 26606 return true 26607 } 26608 // match: (Lsh64x16 x y) 26609 // cond: shiftIsBounded(v) 26610 // result: (SHLQ x y) 26611 for { 26612 x := v_0 26613 y := v_1 26614 if !(shiftIsBounded(v)) { 26615 break 26616 } 26617 v.reset(OpAMD64SHLQ) 26618 v.AddArg2(x, y) 26619 return true 26620 } 26621 return false 26622 } 26623 func rewriteValueAMD64_OpLsh64x32(v *Value) bool { 26624 v_1 := v.Args[1] 26625 v_0 := v.Args[0] 26626 b := v.Block 26627 // match: (Lsh64x32 <t> x y) 26628 // cond: !shiftIsBounded(v) 26629 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 26630 for { 26631 t := v.Type 26632 x := v_0 26633 y := v_1 26634 if !(!shiftIsBounded(v)) { 26635 break 26636 } 26637 v.reset(OpAMD64ANDQ) 26638 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 26639 v0.AddArg2(x, y) 26640 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 26641 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 26642 v2.AuxInt = int32ToAuxInt(64) 26643 v2.AddArg(y) 26644 v1.AddArg(v2) 26645 v.AddArg2(v0, v1) 26646 return true 26647 } 26648 // match: (Lsh64x32 x y) 26649 // cond: shiftIsBounded(v) 26650 // result: (SHLQ x y) 26651 for { 26652 x := v_0 26653 y := v_1 26654 if !(shiftIsBounded(v)) { 26655 break 26656 } 26657 v.reset(OpAMD64SHLQ) 26658 v.AddArg2(x, y) 26659 return true 26660 } 26661 return false 26662 } 26663 func rewriteValueAMD64_OpLsh64x64(v *Value) bool { 26664 v_1 := v.Args[1] 26665 v_0 := v.Args[0] 26666 b := v.Block 26667 // match: (Lsh64x64 <t> x y) 26668 // cond: !shiftIsBounded(v) 26669 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 26670 for { 26671 t := v.Type 26672 x := v_0 26673 y := v_1 26674 if !(!shiftIsBounded(v)) { 26675 break 26676 } 26677 v.reset(OpAMD64ANDQ) 26678 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 26679 v0.AddArg2(x, y) 26680 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 26681 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 26682 v2.AuxInt = int32ToAuxInt(64) 26683 v2.AddArg(y) 26684 v1.AddArg(v2) 26685 v.AddArg2(v0, v1) 26686 return true 26687 } 26688 // match: (Lsh64x64 x y) 26689 // cond: shiftIsBounded(v) 26690 // result: (SHLQ x y) 26691 for { 26692 x := v_0 26693 y := v_1 26694 if !(shiftIsBounded(v)) { 26695 break 26696 } 26697 v.reset(OpAMD64SHLQ) 26698 v.AddArg2(x, y) 26699 return true 26700 } 26701 return false 26702 } 26703 func rewriteValueAMD64_OpLsh64x8(v *Value) bool { 26704 v_1 := v.Args[1] 26705 v_0 := v.Args[0] 26706 b := v.Block 26707 // match: (Lsh64x8 <t> x y) 26708 // cond: !shiftIsBounded(v) 26709 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 26710 for { 26711 t := v.Type 26712 x := v_0 26713 y := v_1 26714 if !(!shiftIsBounded(v)) { 26715 break 26716 } 26717 v.reset(OpAMD64ANDQ) 26718 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 26719 v0.AddArg2(x, y) 26720 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 26721 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 26722 v2.AuxInt = int8ToAuxInt(64) 26723 v2.AddArg(y) 26724 v1.AddArg(v2) 26725 v.AddArg2(v0, v1) 26726 return true 26727 } 26728 // match: (Lsh64x8 x y) 26729 // cond: shiftIsBounded(v) 26730 // result: (SHLQ x y) 26731 for { 26732 x := v_0 26733 y := v_1 26734 if !(shiftIsBounded(v)) { 26735 break 26736 } 26737 v.reset(OpAMD64SHLQ) 26738 v.AddArg2(x, y) 26739 return true 26740 } 26741 return false 26742 } 26743 func rewriteValueAMD64_OpLsh8x16(v *Value) bool { 26744 v_1 := v.Args[1] 26745 v_0 := v.Args[0] 26746 b := v.Block 26747 // match: (Lsh8x16 <t> x y) 26748 // cond: !shiftIsBounded(v) 26749 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 26750 for { 26751 t := v.Type 26752 x := v_0 26753 y := v_1 26754 if !(!shiftIsBounded(v)) { 26755 break 26756 } 26757 v.reset(OpAMD64ANDL) 26758 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 26759 v0.AddArg2(x, y) 26760 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 26761 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 26762 v2.AuxInt = int16ToAuxInt(32) 26763 v2.AddArg(y) 26764 v1.AddArg(v2) 26765 v.AddArg2(v0, v1) 26766 return true 26767 } 26768 // match: (Lsh8x16 x y) 26769 // cond: shiftIsBounded(v) 26770 // result: (SHLL x y) 26771 for { 26772 x := v_0 26773 y := v_1 26774 if !(shiftIsBounded(v)) { 26775 break 26776 } 26777 v.reset(OpAMD64SHLL) 26778 v.AddArg2(x, y) 26779 return true 26780 } 26781 return false 26782 } 26783 func rewriteValueAMD64_OpLsh8x32(v *Value) bool { 26784 v_1 := v.Args[1] 26785 v_0 := v.Args[0] 26786 b := v.Block 26787 // match: (Lsh8x32 <t> x y) 26788 // cond: !shiftIsBounded(v) 26789 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 26790 for { 26791 t := v.Type 26792 x := v_0 26793 y := v_1 26794 if !(!shiftIsBounded(v)) { 26795 break 26796 } 26797 v.reset(OpAMD64ANDL) 26798 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 26799 v0.AddArg2(x, y) 26800 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 26801 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 26802 v2.AuxInt = int32ToAuxInt(32) 26803 v2.AddArg(y) 26804 v1.AddArg(v2) 26805 v.AddArg2(v0, v1) 26806 return true 26807 } 26808 // match: (Lsh8x32 x y) 26809 // cond: shiftIsBounded(v) 26810 // result: (SHLL x y) 26811 for { 26812 x := v_0 26813 y := v_1 26814 if !(shiftIsBounded(v)) { 26815 break 26816 } 26817 v.reset(OpAMD64SHLL) 26818 v.AddArg2(x, y) 26819 return true 26820 } 26821 return false 26822 } 26823 func rewriteValueAMD64_OpLsh8x64(v *Value) bool { 26824 v_1 := v.Args[1] 26825 v_0 := v.Args[0] 26826 b := v.Block 26827 // match: (Lsh8x64 <t> x y) 26828 // cond: !shiftIsBounded(v) 26829 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 26830 for { 26831 t := v.Type 26832 x := v_0 26833 y := v_1 26834 if !(!shiftIsBounded(v)) { 26835 break 26836 } 26837 v.reset(OpAMD64ANDL) 26838 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 26839 v0.AddArg2(x, y) 26840 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 26841 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 26842 v2.AuxInt = int32ToAuxInt(32) 26843 v2.AddArg(y) 26844 v1.AddArg(v2) 26845 v.AddArg2(v0, v1) 26846 return true 26847 } 26848 // match: (Lsh8x64 x y) 26849 // cond: shiftIsBounded(v) 26850 // result: (SHLL x y) 26851 for { 26852 x := v_0 26853 y := v_1 26854 if !(shiftIsBounded(v)) { 26855 break 26856 } 26857 v.reset(OpAMD64SHLL) 26858 v.AddArg2(x, y) 26859 return true 26860 } 26861 return false 26862 } 26863 func rewriteValueAMD64_OpLsh8x8(v *Value) bool { 26864 v_1 := v.Args[1] 26865 v_0 := v.Args[0] 26866 b := v.Block 26867 // match: (Lsh8x8 <t> x y) 26868 // cond: !shiftIsBounded(v) 26869 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 26870 for { 26871 t := v.Type 26872 x := v_0 26873 y := v_1 26874 if !(!shiftIsBounded(v)) { 26875 break 26876 } 26877 v.reset(OpAMD64ANDL) 26878 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 26879 v0.AddArg2(x, y) 26880 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 26881 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 26882 v2.AuxInt = int8ToAuxInt(32) 26883 v2.AddArg(y) 26884 v1.AddArg(v2) 26885 v.AddArg2(v0, v1) 26886 return true 26887 } 26888 // match: (Lsh8x8 x y) 26889 // cond: shiftIsBounded(v) 26890 // result: (SHLL x y) 26891 for { 26892 x := v_0 26893 y := v_1 26894 if !(shiftIsBounded(v)) { 26895 break 26896 } 26897 v.reset(OpAMD64SHLL) 26898 v.AddArg2(x, y) 26899 return true 26900 } 26901 return false 26902 } 26903 func rewriteValueAMD64_OpMax32F(v *Value) bool { 26904 v_1 := v.Args[1] 26905 v_0 := v.Args[0] 26906 b := v.Block 26907 // match: (Max32F <t> x y) 26908 // result: (Neg32F <t> (Min32F <t> (Neg32F <t> x) (Neg32F <t> y))) 26909 for { 26910 t := v.Type 26911 x := v_0 26912 y := v_1 26913 v.reset(OpNeg32F) 26914 v.Type = t 26915 v0 := b.NewValue0(v.Pos, OpMin32F, t) 26916 v1 := b.NewValue0(v.Pos, OpNeg32F, t) 26917 v1.AddArg(x) 26918 v2 := b.NewValue0(v.Pos, OpNeg32F, t) 26919 v2.AddArg(y) 26920 v0.AddArg2(v1, v2) 26921 v.AddArg(v0) 26922 return true 26923 } 26924 } 26925 func rewriteValueAMD64_OpMax64F(v *Value) bool { 26926 v_1 := v.Args[1] 26927 v_0 := v.Args[0] 26928 b := v.Block 26929 // match: (Max64F <t> x y) 26930 // result: (Neg64F <t> (Min64F <t> (Neg64F <t> x) (Neg64F <t> y))) 26931 for { 26932 t := v.Type 26933 x := v_0 26934 y := v_1 26935 v.reset(OpNeg64F) 26936 v.Type = t 26937 v0 := b.NewValue0(v.Pos, OpMin64F, t) 26938 v1 := b.NewValue0(v.Pos, OpNeg64F, t) 26939 v1.AddArg(x) 26940 v2 := b.NewValue0(v.Pos, OpNeg64F, t) 26941 v2.AddArg(y) 26942 v0.AddArg2(v1, v2) 26943 v.AddArg(v0) 26944 return true 26945 } 26946 } 26947 func rewriteValueAMD64_OpMin32F(v *Value) bool { 26948 v_1 := v.Args[1] 26949 v_0 := v.Args[0] 26950 b := v.Block 26951 // match: (Min32F <t> x y) 26952 // result: (POR (MINSS <t> (MINSS <t> x y) x) (MINSS <t> x y)) 26953 for { 26954 t := v.Type 26955 x := v_0 26956 y := v_1 26957 v.reset(OpAMD64POR) 26958 v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) 26959 v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) 26960 v1.AddArg2(x, y) 26961 v0.AddArg2(v1, x) 26962 v.AddArg2(v0, v1) 26963 return true 26964 } 26965 } 26966 func rewriteValueAMD64_OpMin64F(v *Value) bool { 26967 v_1 := v.Args[1] 26968 v_0 := v.Args[0] 26969 b := v.Block 26970 // match: (Min64F <t> x y) 26971 // result: (POR (MINSD <t> (MINSD <t> x y) x) (MINSD <t> x y)) 26972 for { 26973 t := v.Type 26974 x := v_0 26975 y := v_1 26976 v.reset(OpAMD64POR) 26977 v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) 26978 v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) 26979 v1.AddArg2(x, y) 26980 v0.AddArg2(v1, x) 26981 v.AddArg2(v0, v1) 26982 return true 26983 } 26984 } 26985 func rewriteValueAMD64_OpMod16(v *Value) bool { 26986 v_1 := v.Args[1] 26987 v_0 := v.Args[0] 26988 b := v.Block 26989 typ := &b.Func.Config.Types 26990 // match: (Mod16 [a] x y) 26991 // result: (Select1 (DIVW [a] x y)) 26992 for { 26993 a := auxIntToBool(v.AuxInt) 26994 x := v_0 26995 y := v_1 26996 v.reset(OpSelect1) 26997 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 26998 v0.AuxInt = boolToAuxInt(a) 26999 v0.AddArg2(x, y) 27000 v.AddArg(v0) 27001 return true 27002 } 27003 } 27004 func rewriteValueAMD64_OpMod16u(v *Value) bool { 27005 v_1 := v.Args[1] 27006 v_0 := v.Args[0] 27007 b := v.Block 27008 typ := &b.Func.Config.Types 27009 // match: (Mod16u x y) 27010 // result: (Select1 (DIVWU x y)) 27011 for { 27012 x := v_0 27013 y := v_1 27014 v.reset(OpSelect1) 27015 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 27016 v0.AddArg2(x, y) 27017 v.AddArg(v0) 27018 return true 27019 } 27020 } 27021 func rewriteValueAMD64_OpMod32(v *Value) bool { 27022 v_1 := v.Args[1] 27023 v_0 := v.Args[0] 27024 b := v.Block 27025 typ := &b.Func.Config.Types 27026 // match: (Mod32 [a] x y) 27027 // result: (Select1 (DIVL [a] x y)) 27028 for { 27029 a := auxIntToBool(v.AuxInt) 27030 x := v_0 27031 y := v_1 27032 v.reset(OpSelect1) 27033 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 27034 v0.AuxInt = boolToAuxInt(a) 27035 v0.AddArg2(x, y) 27036 v.AddArg(v0) 27037 return true 27038 } 27039 } 27040 func rewriteValueAMD64_OpMod32u(v *Value) bool { 27041 v_1 := v.Args[1] 27042 v_0 := v.Args[0] 27043 b := v.Block 27044 typ := &b.Func.Config.Types 27045 // match: (Mod32u x y) 27046 // result: (Select1 (DIVLU x y)) 27047 for { 27048 x := v_0 27049 y := v_1 27050 v.reset(OpSelect1) 27051 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 27052 v0.AddArg2(x, y) 27053 v.AddArg(v0) 27054 return true 27055 } 27056 } 27057 func rewriteValueAMD64_OpMod64(v *Value) bool { 27058 v_1 := v.Args[1] 27059 v_0 := v.Args[0] 27060 b := v.Block 27061 typ := &b.Func.Config.Types 27062 // match: (Mod64 [a] x y) 27063 // result: (Select1 (DIVQ [a] x y)) 27064 for { 27065 a := auxIntToBool(v.AuxInt) 27066 x := v_0 27067 y := v_1 27068 v.reset(OpSelect1) 27069 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 27070 v0.AuxInt = boolToAuxInt(a) 27071 v0.AddArg2(x, y) 27072 v.AddArg(v0) 27073 return true 27074 } 27075 } 27076 func rewriteValueAMD64_OpMod64u(v *Value) bool { 27077 v_1 := v.Args[1] 27078 v_0 := v.Args[0] 27079 b := v.Block 27080 typ := &b.Func.Config.Types 27081 // match: (Mod64u x y) 27082 // result: (Select1 (DIVQU x y)) 27083 for { 27084 x := v_0 27085 y := v_1 27086 v.reset(OpSelect1) 27087 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 27088 v0.AddArg2(x, y) 27089 v.AddArg(v0) 27090 return true 27091 } 27092 } 27093 func rewriteValueAMD64_OpMod8(v *Value) bool { 27094 v_1 := v.Args[1] 27095 v_0 := v.Args[0] 27096 b := v.Block 27097 typ := &b.Func.Config.Types 27098 // match: (Mod8 x y) 27099 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 27100 for { 27101 x := v_0 27102 y := v_1 27103 v.reset(OpSelect1) 27104 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 27105 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 27106 v1.AddArg(x) 27107 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 27108 v2.AddArg(y) 27109 v0.AddArg2(v1, v2) 27110 v.AddArg(v0) 27111 return true 27112 } 27113 } 27114 func rewriteValueAMD64_OpMod8u(v *Value) bool { 27115 v_1 := v.Args[1] 27116 v_0 := v.Args[0] 27117 b := v.Block 27118 typ := &b.Func.Config.Types 27119 // match: (Mod8u x y) 27120 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 27121 for { 27122 x := v_0 27123 y := v_1 27124 v.reset(OpSelect1) 27125 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 27126 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 27127 v1.AddArg(x) 27128 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 27129 v2.AddArg(y) 27130 v0.AddArg2(v1, v2) 27131 v.AddArg(v0) 27132 return true 27133 } 27134 } 27135 func rewriteValueAMD64_OpMove(v *Value) bool { 27136 v_2 := v.Args[2] 27137 v_1 := v.Args[1] 27138 v_0 := v.Args[0] 27139 b := v.Block 27140 config := b.Func.Config 27141 typ := &b.Func.Config.Types 27142 // match: (Move [0] _ _ mem) 27143 // result: mem 27144 for { 27145 if auxIntToInt64(v.AuxInt) != 0 { 27146 break 27147 } 27148 mem := v_2 27149 v.copyOf(mem) 27150 return true 27151 } 27152 // match: (Move [1] dst src mem) 27153 // result: (MOVBstore dst (MOVBload src mem) mem) 27154 for { 27155 if auxIntToInt64(v.AuxInt) != 1 { 27156 break 27157 } 27158 dst := v_0 27159 src := v_1 27160 mem := v_2 27161 v.reset(OpAMD64MOVBstore) 27162 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 27163 v0.AddArg2(src, mem) 27164 v.AddArg3(dst, v0, mem) 27165 return true 27166 } 27167 // match: (Move [2] dst src mem) 27168 // result: (MOVWstore dst (MOVWload src mem) mem) 27169 for { 27170 if auxIntToInt64(v.AuxInt) != 2 { 27171 break 27172 } 27173 dst := v_0 27174 src := v_1 27175 mem := v_2 27176 v.reset(OpAMD64MOVWstore) 27177 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27178 v0.AddArg2(src, mem) 27179 v.AddArg3(dst, v0, mem) 27180 return true 27181 } 27182 // match: (Move [4] dst src mem) 27183 // result: (MOVLstore dst (MOVLload src mem) mem) 27184 for { 27185 if auxIntToInt64(v.AuxInt) != 4 { 27186 break 27187 } 27188 dst := v_0 27189 src := v_1 27190 mem := v_2 27191 v.reset(OpAMD64MOVLstore) 27192 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27193 v0.AddArg2(src, mem) 27194 v.AddArg3(dst, v0, mem) 27195 return true 27196 } 27197 // match: (Move [8] dst src mem) 27198 // result: (MOVQstore dst (MOVQload src mem) mem) 27199 for { 27200 if auxIntToInt64(v.AuxInt) != 8 { 27201 break 27202 } 27203 dst := v_0 27204 src := v_1 27205 mem := v_2 27206 v.reset(OpAMD64MOVQstore) 27207 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27208 v0.AddArg2(src, mem) 27209 v.AddArg3(dst, v0, mem) 27210 return true 27211 } 27212 // match: (Move [16] dst src mem) 27213 // cond: config.useSSE 27214 // result: (MOVOstore dst (MOVOload src mem) mem) 27215 for { 27216 if auxIntToInt64(v.AuxInt) != 16 { 27217 break 27218 } 27219 dst := v_0 27220 src := v_1 27221 mem := v_2 27222 if !(config.useSSE) { 27223 break 27224 } 27225 v.reset(OpAMD64MOVOstore) 27226 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 27227 v0.AddArg2(src, mem) 27228 v.AddArg3(dst, v0, mem) 27229 return true 27230 } 27231 // match: (Move [16] dst src mem) 27232 // cond: !config.useSSE 27233 // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 27234 for { 27235 if auxIntToInt64(v.AuxInt) != 16 { 27236 break 27237 } 27238 dst := v_0 27239 src := v_1 27240 mem := v_2 27241 if !(!config.useSSE) { 27242 break 27243 } 27244 v.reset(OpAMD64MOVQstore) 27245 v.AuxInt = int32ToAuxInt(8) 27246 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27247 v0.AuxInt = int32ToAuxInt(8) 27248 v0.AddArg2(src, mem) 27249 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 27250 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27251 v2.AddArg2(src, mem) 27252 v1.AddArg3(dst, v2, mem) 27253 v.AddArg3(dst, v0, v1) 27254 return true 27255 } 27256 // match: (Move [32] dst src mem) 27257 // result: (Move [16] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem)) 27258 for { 27259 if auxIntToInt64(v.AuxInt) != 32 { 27260 break 27261 } 27262 dst := v_0 27263 src := v_1 27264 mem := v_2 27265 v.reset(OpMove) 27266 v.AuxInt = int64ToAuxInt(16) 27267 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 27268 v0.AuxInt = int64ToAuxInt(16) 27269 v0.AddArg(dst) 27270 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 27271 v1.AuxInt = int64ToAuxInt(16) 27272 v1.AddArg(src) 27273 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 27274 v2.AuxInt = int64ToAuxInt(16) 27275 v2.AddArg3(dst, src, mem) 27276 v.AddArg3(v0, v1, v2) 27277 return true 27278 } 27279 // match: (Move [48] dst src mem) 27280 // cond: config.useSSE 27281 // result: (Move [32] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem)) 27282 for { 27283 if auxIntToInt64(v.AuxInt) != 48 { 27284 break 27285 } 27286 dst := v_0 27287 src := v_1 27288 mem := v_2 27289 if !(config.useSSE) { 27290 break 27291 } 27292 v.reset(OpMove) 27293 v.AuxInt = int64ToAuxInt(32) 27294 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 27295 v0.AuxInt = int64ToAuxInt(16) 27296 v0.AddArg(dst) 27297 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 27298 v1.AuxInt = int64ToAuxInt(16) 27299 v1.AddArg(src) 27300 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 27301 v2.AuxInt = int64ToAuxInt(16) 27302 v2.AddArg3(dst, src, mem) 27303 v.AddArg3(v0, v1, v2) 27304 return true 27305 } 27306 // match: (Move [64] dst src mem) 27307 // cond: config.useSSE 27308 // result: (Move [32] (OffPtr <dst.Type> dst [32]) (OffPtr <src.Type> src [32]) (Move [32] dst src mem)) 27309 for { 27310 if auxIntToInt64(v.AuxInt) != 64 { 27311 break 27312 } 27313 dst := v_0 27314 src := v_1 27315 mem := v_2 27316 if !(config.useSSE) { 27317 break 27318 } 27319 v.reset(OpMove) 27320 v.AuxInt = int64ToAuxInt(32) 27321 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 27322 v0.AuxInt = int64ToAuxInt(32) 27323 v0.AddArg(dst) 27324 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 27325 v1.AuxInt = int64ToAuxInt(32) 27326 v1.AddArg(src) 27327 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 27328 v2.AuxInt = int64ToAuxInt(32) 27329 v2.AddArg3(dst, src, mem) 27330 v.AddArg3(v0, v1, v2) 27331 return true 27332 } 27333 // match: (Move [3] dst src mem) 27334 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 27335 for { 27336 if auxIntToInt64(v.AuxInt) != 3 { 27337 break 27338 } 27339 dst := v_0 27340 src := v_1 27341 mem := v_2 27342 v.reset(OpAMD64MOVBstore) 27343 v.AuxInt = int32ToAuxInt(2) 27344 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 27345 v0.AuxInt = int32ToAuxInt(2) 27346 v0.AddArg2(src, mem) 27347 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 27348 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27349 v2.AddArg2(src, mem) 27350 v1.AddArg3(dst, v2, mem) 27351 v.AddArg3(dst, v0, v1) 27352 return true 27353 } 27354 // match: (Move [5] dst src mem) 27355 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 27356 for { 27357 if auxIntToInt64(v.AuxInt) != 5 { 27358 break 27359 } 27360 dst := v_0 27361 src := v_1 27362 mem := v_2 27363 v.reset(OpAMD64MOVBstore) 27364 v.AuxInt = int32ToAuxInt(4) 27365 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 27366 v0.AuxInt = int32ToAuxInt(4) 27367 v0.AddArg2(src, mem) 27368 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 27369 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27370 v2.AddArg2(src, mem) 27371 v1.AddArg3(dst, v2, mem) 27372 v.AddArg3(dst, v0, v1) 27373 return true 27374 } 27375 // match: (Move [6] dst src mem) 27376 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 27377 for { 27378 if auxIntToInt64(v.AuxInt) != 6 { 27379 break 27380 } 27381 dst := v_0 27382 src := v_1 27383 mem := v_2 27384 v.reset(OpAMD64MOVWstore) 27385 v.AuxInt = int32ToAuxInt(4) 27386 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27387 v0.AuxInt = int32ToAuxInt(4) 27388 v0.AddArg2(src, mem) 27389 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 27390 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27391 v2.AddArg2(src, mem) 27392 v1.AddArg3(dst, v2, mem) 27393 v.AddArg3(dst, v0, v1) 27394 return true 27395 } 27396 // match: (Move [7] dst src mem) 27397 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 27398 for { 27399 if auxIntToInt64(v.AuxInt) != 7 { 27400 break 27401 } 27402 dst := v_0 27403 src := v_1 27404 mem := v_2 27405 v.reset(OpAMD64MOVLstore) 27406 v.AuxInt = int32ToAuxInt(3) 27407 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27408 v0.AuxInt = int32ToAuxInt(3) 27409 v0.AddArg2(src, mem) 27410 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 27411 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27412 v2.AddArg2(src, mem) 27413 v1.AddArg3(dst, v2, mem) 27414 v.AddArg3(dst, v0, v1) 27415 return true 27416 } 27417 // match: (Move [9] dst src mem) 27418 // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 27419 for { 27420 if auxIntToInt64(v.AuxInt) != 9 { 27421 break 27422 } 27423 dst := v_0 27424 src := v_1 27425 mem := v_2 27426 v.reset(OpAMD64MOVBstore) 27427 v.AuxInt = int32ToAuxInt(8) 27428 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 27429 v0.AuxInt = int32ToAuxInt(8) 27430 v0.AddArg2(src, mem) 27431 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 27432 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27433 v2.AddArg2(src, mem) 27434 v1.AddArg3(dst, v2, mem) 27435 v.AddArg3(dst, v0, v1) 27436 return true 27437 } 27438 // match: (Move [10] dst src mem) 27439 // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 27440 for { 27441 if auxIntToInt64(v.AuxInt) != 10 { 27442 break 27443 } 27444 dst := v_0 27445 src := v_1 27446 mem := v_2 27447 v.reset(OpAMD64MOVWstore) 27448 v.AuxInt = int32ToAuxInt(8) 27449 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27450 v0.AuxInt = int32ToAuxInt(8) 27451 v0.AddArg2(src, mem) 27452 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 27453 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27454 v2.AddArg2(src, mem) 27455 v1.AddArg3(dst, v2, mem) 27456 v.AddArg3(dst, v0, v1) 27457 return true 27458 } 27459 // match: (Move [11] dst src mem) 27460 // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) 27461 for { 27462 if auxIntToInt64(v.AuxInt) != 11 { 27463 break 27464 } 27465 dst := v_0 27466 src := v_1 27467 mem := v_2 27468 v.reset(OpAMD64MOVLstore) 27469 v.AuxInt = int32ToAuxInt(7) 27470 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27471 v0.AuxInt = int32ToAuxInt(7) 27472 v0.AddArg2(src, mem) 27473 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 27474 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27475 v2.AddArg2(src, mem) 27476 v1.AddArg3(dst, v2, mem) 27477 v.AddArg3(dst, v0, v1) 27478 return true 27479 } 27480 // match: (Move [12] dst src mem) 27481 // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 27482 for { 27483 if auxIntToInt64(v.AuxInt) != 12 { 27484 break 27485 } 27486 dst := v_0 27487 src := v_1 27488 mem := v_2 27489 v.reset(OpAMD64MOVLstore) 27490 v.AuxInt = int32ToAuxInt(8) 27491 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27492 v0.AuxInt = int32ToAuxInt(8) 27493 v0.AddArg2(src, mem) 27494 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 27495 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27496 v2.AddArg2(src, mem) 27497 v1.AddArg3(dst, v2, mem) 27498 v.AddArg3(dst, v0, v1) 27499 return true 27500 } 27501 // match: (Move [s] dst src mem) 27502 // cond: s >= 13 && s <= 15 27503 // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) 27504 for { 27505 s := auxIntToInt64(v.AuxInt) 27506 dst := v_0 27507 src := v_1 27508 mem := v_2 27509 if !(s >= 13 && s <= 15) { 27510 break 27511 } 27512 v.reset(OpAMD64MOVQstore) 27513 v.AuxInt = int32ToAuxInt(int32(s - 8)) 27514 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27515 v0.AuxInt = int32ToAuxInt(int32(s - 8)) 27516 v0.AddArg2(src, mem) 27517 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 27518 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27519 v2.AddArg2(src, mem) 27520 v1.AddArg3(dst, v2, mem) 27521 v.AddArg3(dst, v0, v1) 27522 return true 27523 } 27524 // match: (Move [s] dst src mem) 27525 // cond: s > 16 && s%16 != 0 && s%16 <= 8 27526 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 27527 for { 27528 s := auxIntToInt64(v.AuxInt) 27529 dst := v_0 27530 src := v_1 27531 mem := v_2 27532 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 27533 break 27534 } 27535 v.reset(OpMove) 27536 v.AuxInt = int64ToAuxInt(s - s%16) 27537 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 27538 v0.AuxInt = int64ToAuxInt(s % 16) 27539 v0.AddArg(dst) 27540 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 27541 v1.AuxInt = int64ToAuxInt(s % 16) 27542 v1.AddArg(src) 27543 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 27544 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27545 v3.AddArg2(src, mem) 27546 v2.AddArg3(dst, v3, mem) 27547 v.AddArg3(v0, v1, v2) 27548 return true 27549 } 27550 // match: (Move [s] dst src mem) 27551 // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE 27552 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 27553 for { 27554 s := auxIntToInt64(v.AuxInt) 27555 dst := v_0 27556 src := v_1 27557 mem := v_2 27558 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { 27559 break 27560 } 27561 v.reset(OpMove) 27562 v.AuxInt = int64ToAuxInt(s - s%16) 27563 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 27564 v0.AuxInt = int64ToAuxInt(s % 16) 27565 v0.AddArg(dst) 27566 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 27567 v1.AuxInt = int64ToAuxInt(s % 16) 27568 v1.AddArg(src) 27569 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 27570 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 27571 v3.AddArg2(src, mem) 27572 v2.AddArg3(dst, v3, mem) 27573 v.AddArg3(v0, v1, v2) 27574 return true 27575 } 27576 // match: (Move [s] dst src mem) 27577 // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE 27578 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) 27579 for { 27580 s := auxIntToInt64(v.AuxInt) 27581 dst := v_0 27582 src := v_1 27583 mem := v_2 27584 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { 27585 break 27586 } 27587 v.reset(OpMove) 27588 v.AuxInt = int64ToAuxInt(s - s%16) 27589 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 27590 v0.AuxInt = int64ToAuxInt(s % 16) 27591 v0.AddArg(dst) 27592 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 27593 v1.AuxInt = int64ToAuxInt(s % 16) 27594 v1.AddArg(src) 27595 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 27596 v2.AuxInt = int32ToAuxInt(8) 27597 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27598 v3.AuxInt = int32ToAuxInt(8) 27599 v3.AddArg2(src, mem) 27600 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 27601 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27602 v5.AddArg2(src, mem) 27603 v4.AddArg3(dst, v5, mem) 27604 v2.AddArg3(dst, v3, v4) 27605 v.AddArg3(v0, v1, v2) 27606 return true 27607 } 27608 // match: (Move [s] dst src mem) 27609 // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s) 27610 // result: (DUFFCOPY [s] dst src mem) 27611 for { 27612 s := auxIntToInt64(v.AuxInt) 27613 dst := v_0 27614 src := v_1 27615 mem := v_2 27616 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { 27617 break 27618 } 27619 v.reset(OpAMD64DUFFCOPY) 27620 v.AuxInt = int64ToAuxInt(s) 27621 v.AddArg3(dst, src, mem) 27622 return true 27623 } 27624 // match: (Move [s] dst src mem) 27625 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s) 27626 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 27627 for { 27628 s := auxIntToInt64(v.AuxInt) 27629 dst := v_0 27630 src := v_1 27631 mem := v_2 27632 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) { 27633 break 27634 } 27635 v.reset(OpAMD64REPMOVSQ) 27636 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 27637 v0.AuxInt = int64ToAuxInt(s / 8) 27638 v.AddArg4(dst, src, v0, mem) 27639 return true 27640 } 27641 return false 27642 } 27643 func rewriteValueAMD64_OpNeg32F(v *Value) bool { 27644 v_0 := v.Args[0] 27645 b := v.Block 27646 typ := &b.Func.Config.Types 27647 // match: (Neg32F x) 27648 // result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))])) 27649 for { 27650 x := v_0 27651 v.reset(OpAMD64PXOR) 27652 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 27653 v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) 27654 v.AddArg2(x, v0) 27655 return true 27656 } 27657 } 27658 func rewriteValueAMD64_OpNeg64F(v *Value) bool { 27659 v_0 := v.Args[0] 27660 b := v.Block 27661 typ := &b.Func.Config.Types 27662 // match: (Neg64F x) 27663 // result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)])) 27664 for { 27665 x := v_0 27666 v.reset(OpAMD64PXOR) 27667 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 27668 v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) 27669 v.AddArg2(x, v0) 27670 return true 27671 } 27672 } 27673 func rewriteValueAMD64_OpNeq16(v *Value) bool { 27674 v_1 := v.Args[1] 27675 v_0 := v.Args[0] 27676 b := v.Block 27677 // match: (Neq16 x y) 27678 // result: (SETNE (CMPW x y)) 27679 for { 27680 x := v_0 27681 y := v_1 27682 v.reset(OpAMD64SETNE) 27683 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 27684 v0.AddArg2(x, y) 27685 v.AddArg(v0) 27686 return true 27687 } 27688 } 27689 func rewriteValueAMD64_OpNeq32(v *Value) bool { 27690 v_1 := v.Args[1] 27691 v_0 := v.Args[0] 27692 b := v.Block 27693 // match: (Neq32 x y) 27694 // result: (SETNE (CMPL x y)) 27695 for { 27696 x := v_0 27697 y := v_1 27698 v.reset(OpAMD64SETNE) 27699 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 27700 v0.AddArg2(x, y) 27701 v.AddArg(v0) 27702 return true 27703 } 27704 } 27705 func rewriteValueAMD64_OpNeq32F(v *Value) bool { 27706 v_1 := v.Args[1] 27707 v_0 := v.Args[0] 27708 b := v.Block 27709 // match: (Neq32F x y) 27710 // result: (SETNEF (UCOMISS x y)) 27711 for { 27712 x := v_0 27713 y := v_1 27714 v.reset(OpAMD64SETNEF) 27715 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 27716 v0.AddArg2(x, y) 27717 v.AddArg(v0) 27718 return true 27719 } 27720 } 27721 func rewriteValueAMD64_OpNeq64(v *Value) bool { 27722 v_1 := v.Args[1] 27723 v_0 := v.Args[0] 27724 b := v.Block 27725 // match: (Neq64 x y) 27726 // result: (SETNE (CMPQ x y)) 27727 for { 27728 x := v_0 27729 y := v_1 27730 v.reset(OpAMD64SETNE) 27731 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 27732 v0.AddArg2(x, y) 27733 v.AddArg(v0) 27734 return true 27735 } 27736 } 27737 func rewriteValueAMD64_OpNeq64F(v *Value) bool { 27738 v_1 := v.Args[1] 27739 v_0 := v.Args[0] 27740 b := v.Block 27741 // match: (Neq64F x y) 27742 // result: (SETNEF (UCOMISD x y)) 27743 for { 27744 x := v_0 27745 y := v_1 27746 v.reset(OpAMD64SETNEF) 27747 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 27748 v0.AddArg2(x, y) 27749 v.AddArg(v0) 27750 return true 27751 } 27752 } 27753 func rewriteValueAMD64_OpNeq8(v *Value) bool { 27754 v_1 := v.Args[1] 27755 v_0 := v.Args[0] 27756 b := v.Block 27757 // match: (Neq8 x y) 27758 // result: (SETNE (CMPB x y)) 27759 for { 27760 x := v_0 27761 y := v_1 27762 v.reset(OpAMD64SETNE) 27763 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 27764 v0.AddArg2(x, y) 27765 v.AddArg(v0) 27766 return true 27767 } 27768 } 27769 func rewriteValueAMD64_OpNeqB(v *Value) bool { 27770 v_1 := v.Args[1] 27771 v_0 := v.Args[0] 27772 b := v.Block 27773 // match: (NeqB x y) 27774 // result: (SETNE (CMPB x y)) 27775 for { 27776 x := v_0 27777 y := v_1 27778 v.reset(OpAMD64SETNE) 27779 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 27780 v0.AddArg2(x, y) 27781 v.AddArg(v0) 27782 return true 27783 } 27784 } 27785 func rewriteValueAMD64_OpNeqPtr(v *Value) bool { 27786 v_1 := v.Args[1] 27787 v_0 := v.Args[0] 27788 b := v.Block 27789 // match: (NeqPtr x y) 27790 // result: (SETNE (CMPQ x y)) 27791 for { 27792 x := v_0 27793 y := v_1 27794 v.reset(OpAMD64SETNE) 27795 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 27796 v0.AddArg2(x, y) 27797 v.AddArg(v0) 27798 return true 27799 } 27800 } 27801 func rewriteValueAMD64_OpNot(v *Value) bool { 27802 v_0 := v.Args[0] 27803 // match: (Not x) 27804 // result: (XORLconst [1] x) 27805 for { 27806 x := v_0 27807 v.reset(OpAMD64XORLconst) 27808 v.AuxInt = int32ToAuxInt(1) 27809 v.AddArg(x) 27810 return true 27811 } 27812 } 27813 func rewriteValueAMD64_OpOffPtr(v *Value) bool { 27814 v_0 := v.Args[0] 27815 b := v.Block 27816 typ := &b.Func.Config.Types 27817 // match: (OffPtr [off] ptr) 27818 // cond: is32Bit(off) 27819 // result: (ADDQconst [int32(off)] ptr) 27820 for { 27821 off := auxIntToInt64(v.AuxInt) 27822 ptr := v_0 27823 if !(is32Bit(off)) { 27824 break 27825 } 27826 v.reset(OpAMD64ADDQconst) 27827 v.AuxInt = int32ToAuxInt(int32(off)) 27828 v.AddArg(ptr) 27829 return true 27830 } 27831 // match: (OffPtr [off] ptr) 27832 // result: (ADDQ (MOVQconst [off]) ptr) 27833 for { 27834 off := auxIntToInt64(v.AuxInt) 27835 ptr := v_0 27836 v.reset(OpAMD64ADDQ) 27837 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 27838 v0.AuxInt = int64ToAuxInt(off) 27839 v.AddArg2(v0, ptr) 27840 return true 27841 } 27842 } 27843 func rewriteValueAMD64_OpPanicBounds(v *Value) bool { 27844 v_2 := v.Args[2] 27845 v_1 := v.Args[1] 27846 v_0 := v.Args[0] 27847 // match: (PanicBounds [kind] x y mem) 27848 // cond: boundsABI(kind) == 0 27849 // result: (LoweredPanicBoundsA [kind] x y mem) 27850 for { 27851 kind := auxIntToInt64(v.AuxInt) 27852 x := v_0 27853 y := v_1 27854 mem := v_2 27855 if !(boundsABI(kind) == 0) { 27856 break 27857 } 27858 v.reset(OpAMD64LoweredPanicBoundsA) 27859 v.AuxInt = int64ToAuxInt(kind) 27860 v.AddArg3(x, y, mem) 27861 return true 27862 } 27863 // match: (PanicBounds [kind] x y mem) 27864 // cond: boundsABI(kind) == 1 27865 // result: (LoweredPanicBoundsB [kind] x y mem) 27866 for { 27867 kind := auxIntToInt64(v.AuxInt) 27868 x := v_0 27869 y := v_1 27870 mem := v_2 27871 if !(boundsABI(kind) == 1) { 27872 break 27873 } 27874 v.reset(OpAMD64LoweredPanicBoundsB) 27875 v.AuxInt = int64ToAuxInt(kind) 27876 v.AddArg3(x, y, mem) 27877 return true 27878 } 27879 // match: (PanicBounds [kind] x y mem) 27880 // cond: boundsABI(kind) == 2 27881 // result: (LoweredPanicBoundsC [kind] x y mem) 27882 for { 27883 kind := auxIntToInt64(v.AuxInt) 27884 x := v_0 27885 y := v_1 27886 mem := v_2 27887 if !(boundsABI(kind) == 2) { 27888 break 27889 } 27890 v.reset(OpAMD64LoweredPanicBoundsC) 27891 v.AuxInt = int64ToAuxInt(kind) 27892 v.AddArg3(x, y, mem) 27893 return true 27894 } 27895 return false 27896 } 27897 func rewriteValueAMD64_OpPopCount16(v *Value) bool { 27898 v_0 := v.Args[0] 27899 b := v.Block 27900 typ := &b.Func.Config.Types 27901 // match: (PopCount16 x) 27902 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 27903 for { 27904 x := v_0 27905 v.reset(OpAMD64POPCNTL) 27906 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 27907 v0.AddArg(x) 27908 v.AddArg(v0) 27909 return true 27910 } 27911 } 27912 func rewriteValueAMD64_OpPopCount8(v *Value) bool { 27913 v_0 := v.Args[0] 27914 b := v.Block 27915 typ := &b.Func.Config.Types 27916 // match: (PopCount8 x) 27917 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 27918 for { 27919 x := v_0 27920 v.reset(OpAMD64POPCNTL) 27921 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 27922 v0.AddArg(x) 27923 v.AddArg(v0) 27924 return true 27925 } 27926 } 27927 func rewriteValueAMD64_OpRoundToEven(v *Value) bool { 27928 v_0 := v.Args[0] 27929 // match: (RoundToEven x) 27930 // result: (ROUNDSD [0] x) 27931 for { 27932 x := v_0 27933 v.reset(OpAMD64ROUNDSD) 27934 v.AuxInt = int8ToAuxInt(0) 27935 v.AddArg(x) 27936 return true 27937 } 27938 } 27939 func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { 27940 v_1 := v.Args[1] 27941 v_0 := v.Args[0] 27942 b := v.Block 27943 // match: (Rsh16Ux16 <t> x y) 27944 // cond: !shiftIsBounded(v) 27945 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 27946 for { 27947 t := v.Type 27948 x := v_0 27949 y := v_1 27950 if !(!shiftIsBounded(v)) { 27951 break 27952 } 27953 v.reset(OpAMD64ANDL) 27954 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 27955 v0.AddArg2(x, y) 27956 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 27957 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 27958 v2.AuxInt = int16ToAuxInt(16) 27959 v2.AddArg(y) 27960 v1.AddArg(v2) 27961 v.AddArg2(v0, v1) 27962 return true 27963 } 27964 // match: (Rsh16Ux16 x y) 27965 // cond: shiftIsBounded(v) 27966 // result: (SHRW x y) 27967 for { 27968 x := v_0 27969 y := v_1 27970 if !(shiftIsBounded(v)) { 27971 break 27972 } 27973 v.reset(OpAMD64SHRW) 27974 v.AddArg2(x, y) 27975 return true 27976 } 27977 return false 27978 } 27979 func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { 27980 v_1 := v.Args[1] 27981 v_0 := v.Args[0] 27982 b := v.Block 27983 // match: (Rsh16Ux32 <t> x y) 27984 // cond: !shiftIsBounded(v) 27985 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 27986 for { 27987 t := v.Type 27988 x := v_0 27989 y := v_1 27990 if !(!shiftIsBounded(v)) { 27991 break 27992 } 27993 v.reset(OpAMD64ANDL) 27994 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 27995 v0.AddArg2(x, y) 27996 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 27997 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 27998 v2.AuxInt = int32ToAuxInt(16) 27999 v2.AddArg(y) 28000 v1.AddArg(v2) 28001 v.AddArg2(v0, v1) 28002 return true 28003 } 28004 // match: (Rsh16Ux32 x y) 28005 // cond: shiftIsBounded(v) 28006 // result: (SHRW x y) 28007 for { 28008 x := v_0 28009 y := v_1 28010 if !(shiftIsBounded(v)) { 28011 break 28012 } 28013 v.reset(OpAMD64SHRW) 28014 v.AddArg2(x, y) 28015 return true 28016 } 28017 return false 28018 } 28019 func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { 28020 v_1 := v.Args[1] 28021 v_0 := v.Args[0] 28022 b := v.Block 28023 // match: (Rsh16Ux64 <t> x y) 28024 // cond: !shiftIsBounded(v) 28025 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 28026 for { 28027 t := v.Type 28028 x := v_0 28029 y := v_1 28030 if !(!shiftIsBounded(v)) { 28031 break 28032 } 28033 v.reset(OpAMD64ANDL) 28034 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 28035 v0.AddArg2(x, y) 28036 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 28037 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 28038 v2.AuxInt = int32ToAuxInt(16) 28039 v2.AddArg(y) 28040 v1.AddArg(v2) 28041 v.AddArg2(v0, v1) 28042 return true 28043 } 28044 // match: (Rsh16Ux64 x y) 28045 // cond: shiftIsBounded(v) 28046 // result: (SHRW x y) 28047 for { 28048 x := v_0 28049 y := v_1 28050 if !(shiftIsBounded(v)) { 28051 break 28052 } 28053 v.reset(OpAMD64SHRW) 28054 v.AddArg2(x, y) 28055 return true 28056 } 28057 return false 28058 } 28059 func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { 28060 v_1 := v.Args[1] 28061 v_0 := v.Args[0] 28062 b := v.Block 28063 // match: (Rsh16Ux8 <t> x y) 28064 // cond: !shiftIsBounded(v) 28065 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 28066 for { 28067 t := v.Type 28068 x := v_0 28069 y := v_1 28070 if !(!shiftIsBounded(v)) { 28071 break 28072 } 28073 v.reset(OpAMD64ANDL) 28074 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 28075 v0.AddArg2(x, y) 28076 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 28077 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 28078 v2.AuxInt = int8ToAuxInt(16) 28079 v2.AddArg(y) 28080 v1.AddArg(v2) 28081 v.AddArg2(v0, v1) 28082 return true 28083 } 28084 // match: (Rsh16Ux8 x y) 28085 // cond: shiftIsBounded(v) 28086 // result: (SHRW x y) 28087 for { 28088 x := v_0 28089 y := v_1 28090 if !(shiftIsBounded(v)) { 28091 break 28092 } 28093 v.reset(OpAMD64SHRW) 28094 v.AddArg2(x, y) 28095 return true 28096 } 28097 return false 28098 } 28099 func rewriteValueAMD64_OpRsh16x16(v *Value) bool { 28100 v_1 := v.Args[1] 28101 v_0 := v.Args[0] 28102 b := v.Block 28103 // match: (Rsh16x16 <t> x y) 28104 // cond: !shiftIsBounded(v) 28105 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 28106 for { 28107 t := v.Type 28108 x := v_0 28109 y := v_1 28110 if !(!shiftIsBounded(v)) { 28111 break 28112 } 28113 v.reset(OpAMD64SARW) 28114 v.Type = t 28115 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 28116 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 28117 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 28118 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 28119 v3.AuxInt = int16ToAuxInt(16) 28120 v3.AddArg(y) 28121 v2.AddArg(v3) 28122 v1.AddArg(v2) 28123 v0.AddArg2(y, v1) 28124 v.AddArg2(x, v0) 28125 return true 28126 } 28127 // match: (Rsh16x16 x y) 28128 // cond: shiftIsBounded(v) 28129 // result: (SARW x y) 28130 for { 28131 x := v_0 28132 y := v_1 28133 if !(shiftIsBounded(v)) { 28134 break 28135 } 28136 v.reset(OpAMD64SARW) 28137 v.AddArg2(x, y) 28138 return true 28139 } 28140 return false 28141 } 28142 func rewriteValueAMD64_OpRsh16x32(v *Value) bool { 28143 v_1 := v.Args[1] 28144 v_0 := v.Args[0] 28145 b := v.Block 28146 // match: (Rsh16x32 <t> x y) 28147 // cond: !shiftIsBounded(v) 28148 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 28149 for { 28150 t := v.Type 28151 x := v_0 28152 y := v_1 28153 if !(!shiftIsBounded(v)) { 28154 break 28155 } 28156 v.reset(OpAMD64SARW) 28157 v.Type = t 28158 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 28159 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 28160 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 28161 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 28162 v3.AuxInt = int32ToAuxInt(16) 28163 v3.AddArg(y) 28164 v2.AddArg(v3) 28165 v1.AddArg(v2) 28166 v0.AddArg2(y, v1) 28167 v.AddArg2(x, v0) 28168 return true 28169 } 28170 // match: (Rsh16x32 x y) 28171 // cond: shiftIsBounded(v) 28172 // result: (SARW x y) 28173 for { 28174 x := v_0 28175 y := v_1 28176 if !(shiftIsBounded(v)) { 28177 break 28178 } 28179 v.reset(OpAMD64SARW) 28180 v.AddArg2(x, y) 28181 return true 28182 } 28183 return false 28184 } 28185 func rewriteValueAMD64_OpRsh16x64(v *Value) bool { 28186 v_1 := v.Args[1] 28187 v_0 := v.Args[0] 28188 b := v.Block 28189 // match: (Rsh16x64 <t> x y) 28190 // cond: !shiftIsBounded(v) 28191 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 28192 for { 28193 t := v.Type 28194 x := v_0 28195 y := v_1 28196 if !(!shiftIsBounded(v)) { 28197 break 28198 } 28199 v.reset(OpAMD64SARW) 28200 v.Type = t 28201 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 28202 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 28203 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 28204 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 28205 v3.AuxInt = int32ToAuxInt(16) 28206 v3.AddArg(y) 28207 v2.AddArg(v3) 28208 v1.AddArg(v2) 28209 v0.AddArg2(y, v1) 28210 v.AddArg2(x, v0) 28211 return true 28212 } 28213 // match: (Rsh16x64 x y) 28214 // cond: shiftIsBounded(v) 28215 // result: (SARW x y) 28216 for { 28217 x := v_0 28218 y := v_1 28219 if !(shiftIsBounded(v)) { 28220 break 28221 } 28222 v.reset(OpAMD64SARW) 28223 v.AddArg2(x, y) 28224 return true 28225 } 28226 return false 28227 } 28228 func rewriteValueAMD64_OpRsh16x8(v *Value) bool { 28229 v_1 := v.Args[1] 28230 v_0 := v.Args[0] 28231 b := v.Block 28232 // match: (Rsh16x8 <t> x y) 28233 // cond: !shiftIsBounded(v) 28234 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 28235 for { 28236 t := v.Type 28237 x := v_0 28238 y := v_1 28239 if !(!shiftIsBounded(v)) { 28240 break 28241 } 28242 v.reset(OpAMD64SARW) 28243 v.Type = t 28244 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 28245 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 28246 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 28247 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 28248 v3.AuxInt = int8ToAuxInt(16) 28249 v3.AddArg(y) 28250 v2.AddArg(v3) 28251 v1.AddArg(v2) 28252 v0.AddArg2(y, v1) 28253 v.AddArg2(x, v0) 28254 return true 28255 } 28256 // match: (Rsh16x8 x y) 28257 // cond: shiftIsBounded(v) 28258 // result: (SARW x y) 28259 for { 28260 x := v_0 28261 y := v_1 28262 if !(shiftIsBounded(v)) { 28263 break 28264 } 28265 v.reset(OpAMD64SARW) 28266 v.AddArg2(x, y) 28267 return true 28268 } 28269 return false 28270 } 28271 func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { 28272 v_1 := v.Args[1] 28273 v_0 := v.Args[0] 28274 b := v.Block 28275 // match: (Rsh32Ux16 <t> x y) 28276 // cond: !shiftIsBounded(v) 28277 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 28278 for { 28279 t := v.Type 28280 x := v_0 28281 y := v_1 28282 if !(!shiftIsBounded(v)) { 28283 break 28284 } 28285 v.reset(OpAMD64ANDL) 28286 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 28287 v0.AddArg2(x, y) 28288 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 28289 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 28290 v2.AuxInt = int16ToAuxInt(32) 28291 v2.AddArg(y) 28292 v1.AddArg(v2) 28293 v.AddArg2(v0, v1) 28294 return true 28295 } 28296 // match: (Rsh32Ux16 x y) 28297 // cond: shiftIsBounded(v) 28298 // result: (SHRL x y) 28299 for { 28300 x := v_0 28301 y := v_1 28302 if !(shiftIsBounded(v)) { 28303 break 28304 } 28305 v.reset(OpAMD64SHRL) 28306 v.AddArg2(x, y) 28307 return true 28308 } 28309 return false 28310 } 28311 func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { 28312 v_1 := v.Args[1] 28313 v_0 := v.Args[0] 28314 b := v.Block 28315 // match: (Rsh32Ux32 <t> x y) 28316 // cond: !shiftIsBounded(v) 28317 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 28318 for { 28319 t := v.Type 28320 x := v_0 28321 y := v_1 28322 if !(!shiftIsBounded(v)) { 28323 break 28324 } 28325 v.reset(OpAMD64ANDL) 28326 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 28327 v0.AddArg2(x, y) 28328 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 28329 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 28330 v2.AuxInt = int32ToAuxInt(32) 28331 v2.AddArg(y) 28332 v1.AddArg(v2) 28333 v.AddArg2(v0, v1) 28334 return true 28335 } 28336 // match: (Rsh32Ux32 x y) 28337 // cond: shiftIsBounded(v) 28338 // result: (SHRL x y) 28339 for { 28340 x := v_0 28341 y := v_1 28342 if !(shiftIsBounded(v)) { 28343 break 28344 } 28345 v.reset(OpAMD64SHRL) 28346 v.AddArg2(x, y) 28347 return true 28348 } 28349 return false 28350 } 28351 func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { 28352 v_1 := v.Args[1] 28353 v_0 := v.Args[0] 28354 b := v.Block 28355 // match: (Rsh32Ux64 <t> x y) 28356 // cond: !shiftIsBounded(v) 28357 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 28358 for { 28359 t := v.Type 28360 x := v_0 28361 y := v_1 28362 if !(!shiftIsBounded(v)) { 28363 break 28364 } 28365 v.reset(OpAMD64ANDL) 28366 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 28367 v0.AddArg2(x, y) 28368 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 28369 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 28370 v2.AuxInt = int32ToAuxInt(32) 28371 v2.AddArg(y) 28372 v1.AddArg(v2) 28373 v.AddArg2(v0, v1) 28374 return true 28375 } 28376 // match: (Rsh32Ux64 x y) 28377 // cond: shiftIsBounded(v) 28378 // result: (SHRL x y) 28379 for { 28380 x := v_0 28381 y := v_1 28382 if !(shiftIsBounded(v)) { 28383 break 28384 } 28385 v.reset(OpAMD64SHRL) 28386 v.AddArg2(x, y) 28387 return true 28388 } 28389 return false 28390 } 28391 func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { 28392 v_1 := v.Args[1] 28393 v_0 := v.Args[0] 28394 b := v.Block 28395 // match: (Rsh32Ux8 <t> x y) 28396 // cond: !shiftIsBounded(v) 28397 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 28398 for { 28399 t := v.Type 28400 x := v_0 28401 y := v_1 28402 if !(!shiftIsBounded(v)) { 28403 break 28404 } 28405 v.reset(OpAMD64ANDL) 28406 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 28407 v0.AddArg2(x, y) 28408 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 28409 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 28410 v2.AuxInt = int8ToAuxInt(32) 28411 v2.AddArg(y) 28412 v1.AddArg(v2) 28413 v.AddArg2(v0, v1) 28414 return true 28415 } 28416 // match: (Rsh32Ux8 x y) 28417 // cond: shiftIsBounded(v) 28418 // result: (SHRL x y) 28419 for { 28420 x := v_0 28421 y := v_1 28422 if !(shiftIsBounded(v)) { 28423 break 28424 } 28425 v.reset(OpAMD64SHRL) 28426 v.AddArg2(x, y) 28427 return true 28428 } 28429 return false 28430 } 28431 func rewriteValueAMD64_OpRsh32x16(v *Value) bool { 28432 v_1 := v.Args[1] 28433 v_0 := v.Args[0] 28434 b := v.Block 28435 // match: (Rsh32x16 <t> x y) 28436 // cond: !shiftIsBounded(v) 28437 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 28438 for { 28439 t := v.Type 28440 x := v_0 28441 y := v_1 28442 if !(!shiftIsBounded(v)) { 28443 break 28444 } 28445 v.reset(OpAMD64SARL) 28446 v.Type = t 28447 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 28448 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 28449 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 28450 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 28451 v3.AuxInt = int16ToAuxInt(32) 28452 v3.AddArg(y) 28453 v2.AddArg(v3) 28454 v1.AddArg(v2) 28455 v0.AddArg2(y, v1) 28456 v.AddArg2(x, v0) 28457 return true 28458 } 28459 // match: (Rsh32x16 x y) 28460 // cond: shiftIsBounded(v) 28461 // result: (SARL x y) 28462 for { 28463 x := v_0 28464 y := v_1 28465 if !(shiftIsBounded(v)) { 28466 break 28467 } 28468 v.reset(OpAMD64SARL) 28469 v.AddArg2(x, y) 28470 return true 28471 } 28472 return false 28473 } 28474 func rewriteValueAMD64_OpRsh32x32(v *Value) bool { 28475 v_1 := v.Args[1] 28476 v_0 := v.Args[0] 28477 b := v.Block 28478 // match: (Rsh32x32 <t> x y) 28479 // cond: !shiftIsBounded(v) 28480 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 28481 for { 28482 t := v.Type 28483 x := v_0 28484 y := v_1 28485 if !(!shiftIsBounded(v)) { 28486 break 28487 } 28488 v.reset(OpAMD64SARL) 28489 v.Type = t 28490 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 28491 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 28492 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 28493 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 28494 v3.AuxInt = int32ToAuxInt(32) 28495 v3.AddArg(y) 28496 v2.AddArg(v3) 28497 v1.AddArg(v2) 28498 v0.AddArg2(y, v1) 28499 v.AddArg2(x, v0) 28500 return true 28501 } 28502 // match: (Rsh32x32 x y) 28503 // cond: shiftIsBounded(v) 28504 // result: (SARL x y) 28505 for { 28506 x := v_0 28507 y := v_1 28508 if !(shiftIsBounded(v)) { 28509 break 28510 } 28511 v.reset(OpAMD64SARL) 28512 v.AddArg2(x, y) 28513 return true 28514 } 28515 return false 28516 } 28517 func rewriteValueAMD64_OpRsh32x64(v *Value) bool { 28518 v_1 := v.Args[1] 28519 v_0 := v.Args[0] 28520 b := v.Block 28521 // match: (Rsh32x64 <t> x y) 28522 // cond: !shiftIsBounded(v) 28523 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 28524 for { 28525 t := v.Type 28526 x := v_0 28527 y := v_1 28528 if !(!shiftIsBounded(v)) { 28529 break 28530 } 28531 v.reset(OpAMD64SARL) 28532 v.Type = t 28533 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 28534 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 28535 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 28536 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 28537 v3.AuxInt = int32ToAuxInt(32) 28538 v3.AddArg(y) 28539 v2.AddArg(v3) 28540 v1.AddArg(v2) 28541 v0.AddArg2(y, v1) 28542 v.AddArg2(x, v0) 28543 return true 28544 } 28545 // match: (Rsh32x64 x y) 28546 // cond: shiftIsBounded(v) 28547 // result: (SARL x y) 28548 for { 28549 x := v_0 28550 y := v_1 28551 if !(shiftIsBounded(v)) { 28552 break 28553 } 28554 v.reset(OpAMD64SARL) 28555 v.AddArg2(x, y) 28556 return true 28557 } 28558 return false 28559 } 28560 func rewriteValueAMD64_OpRsh32x8(v *Value) bool { 28561 v_1 := v.Args[1] 28562 v_0 := v.Args[0] 28563 b := v.Block 28564 // match: (Rsh32x8 <t> x y) 28565 // cond: !shiftIsBounded(v) 28566 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 28567 for { 28568 t := v.Type 28569 x := v_0 28570 y := v_1 28571 if !(!shiftIsBounded(v)) { 28572 break 28573 } 28574 v.reset(OpAMD64SARL) 28575 v.Type = t 28576 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 28577 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 28578 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 28579 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 28580 v3.AuxInt = int8ToAuxInt(32) 28581 v3.AddArg(y) 28582 v2.AddArg(v3) 28583 v1.AddArg(v2) 28584 v0.AddArg2(y, v1) 28585 v.AddArg2(x, v0) 28586 return true 28587 } 28588 // match: (Rsh32x8 x y) 28589 // cond: shiftIsBounded(v) 28590 // result: (SARL x y) 28591 for { 28592 x := v_0 28593 y := v_1 28594 if !(shiftIsBounded(v)) { 28595 break 28596 } 28597 v.reset(OpAMD64SARL) 28598 v.AddArg2(x, y) 28599 return true 28600 } 28601 return false 28602 } 28603 func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { 28604 v_1 := v.Args[1] 28605 v_0 := v.Args[0] 28606 b := v.Block 28607 // match: (Rsh64Ux16 <t> x y) 28608 // cond: !shiftIsBounded(v) 28609 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 28610 for { 28611 t := v.Type 28612 x := v_0 28613 y := v_1 28614 if !(!shiftIsBounded(v)) { 28615 break 28616 } 28617 v.reset(OpAMD64ANDQ) 28618 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 28619 v0.AddArg2(x, y) 28620 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 28621 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 28622 v2.AuxInt = int16ToAuxInt(64) 28623 v2.AddArg(y) 28624 v1.AddArg(v2) 28625 v.AddArg2(v0, v1) 28626 return true 28627 } 28628 // match: (Rsh64Ux16 x y) 28629 // cond: shiftIsBounded(v) 28630 // result: (SHRQ x y) 28631 for { 28632 x := v_0 28633 y := v_1 28634 if !(shiftIsBounded(v)) { 28635 break 28636 } 28637 v.reset(OpAMD64SHRQ) 28638 v.AddArg2(x, y) 28639 return true 28640 } 28641 return false 28642 } 28643 func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { 28644 v_1 := v.Args[1] 28645 v_0 := v.Args[0] 28646 b := v.Block 28647 // match: (Rsh64Ux32 <t> x y) 28648 // cond: !shiftIsBounded(v) 28649 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 28650 for { 28651 t := v.Type 28652 x := v_0 28653 y := v_1 28654 if !(!shiftIsBounded(v)) { 28655 break 28656 } 28657 v.reset(OpAMD64ANDQ) 28658 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 28659 v0.AddArg2(x, y) 28660 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 28661 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 28662 v2.AuxInt = int32ToAuxInt(64) 28663 v2.AddArg(y) 28664 v1.AddArg(v2) 28665 v.AddArg2(v0, v1) 28666 return true 28667 } 28668 // match: (Rsh64Ux32 x y) 28669 // cond: shiftIsBounded(v) 28670 // result: (SHRQ x y) 28671 for { 28672 x := v_0 28673 y := v_1 28674 if !(shiftIsBounded(v)) { 28675 break 28676 } 28677 v.reset(OpAMD64SHRQ) 28678 v.AddArg2(x, y) 28679 return true 28680 } 28681 return false 28682 } 28683 func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { 28684 v_1 := v.Args[1] 28685 v_0 := v.Args[0] 28686 b := v.Block 28687 // match: (Rsh64Ux64 <t> x y) 28688 // cond: !shiftIsBounded(v) 28689 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 28690 for { 28691 t := v.Type 28692 x := v_0 28693 y := v_1 28694 if !(!shiftIsBounded(v)) { 28695 break 28696 } 28697 v.reset(OpAMD64ANDQ) 28698 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 28699 v0.AddArg2(x, y) 28700 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 28701 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 28702 v2.AuxInt = int32ToAuxInt(64) 28703 v2.AddArg(y) 28704 v1.AddArg(v2) 28705 v.AddArg2(v0, v1) 28706 return true 28707 } 28708 // match: (Rsh64Ux64 x y) 28709 // cond: shiftIsBounded(v) 28710 // result: (SHRQ x y) 28711 for { 28712 x := v_0 28713 y := v_1 28714 if !(shiftIsBounded(v)) { 28715 break 28716 } 28717 v.reset(OpAMD64SHRQ) 28718 v.AddArg2(x, y) 28719 return true 28720 } 28721 return false 28722 } 28723 func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { 28724 v_1 := v.Args[1] 28725 v_0 := v.Args[0] 28726 b := v.Block 28727 // match: (Rsh64Ux8 <t> x y) 28728 // cond: !shiftIsBounded(v) 28729 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 28730 for { 28731 t := v.Type 28732 x := v_0 28733 y := v_1 28734 if !(!shiftIsBounded(v)) { 28735 break 28736 } 28737 v.reset(OpAMD64ANDQ) 28738 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 28739 v0.AddArg2(x, y) 28740 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 28741 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 28742 v2.AuxInt = int8ToAuxInt(64) 28743 v2.AddArg(y) 28744 v1.AddArg(v2) 28745 v.AddArg2(v0, v1) 28746 return true 28747 } 28748 // match: (Rsh64Ux8 x y) 28749 // cond: shiftIsBounded(v) 28750 // result: (SHRQ x y) 28751 for { 28752 x := v_0 28753 y := v_1 28754 if !(shiftIsBounded(v)) { 28755 break 28756 } 28757 v.reset(OpAMD64SHRQ) 28758 v.AddArg2(x, y) 28759 return true 28760 } 28761 return false 28762 } 28763 func rewriteValueAMD64_OpRsh64x16(v *Value) bool { 28764 v_1 := v.Args[1] 28765 v_0 := v.Args[0] 28766 b := v.Block 28767 // match: (Rsh64x16 <t> x y) 28768 // cond: !shiftIsBounded(v) 28769 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 28770 for { 28771 t := v.Type 28772 x := v_0 28773 y := v_1 28774 if !(!shiftIsBounded(v)) { 28775 break 28776 } 28777 v.reset(OpAMD64SARQ) 28778 v.Type = t 28779 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 28780 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 28781 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 28782 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 28783 v3.AuxInt = int16ToAuxInt(64) 28784 v3.AddArg(y) 28785 v2.AddArg(v3) 28786 v1.AddArg(v2) 28787 v0.AddArg2(y, v1) 28788 v.AddArg2(x, v0) 28789 return true 28790 } 28791 // match: (Rsh64x16 x y) 28792 // cond: shiftIsBounded(v) 28793 // result: (SARQ x y) 28794 for { 28795 x := v_0 28796 y := v_1 28797 if !(shiftIsBounded(v)) { 28798 break 28799 } 28800 v.reset(OpAMD64SARQ) 28801 v.AddArg2(x, y) 28802 return true 28803 } 28804 return false 28805 } 28806 func rewriteValueAMD64_OpRsh64x32(v *Value) bool { 28807 v_1 := v.Args[1] 28808 v_0 := v.Args[0] 28809 b := v.Block 28810 // match: (Rsh64x32 <t> x y) 28811 // cond: !shiftIsBounded(v) 28812 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 28813 for { 28814 t := v.Type 28815 x := v_0 28816 y := v_1 28817 if !(!shiftIsBounded(v)) { 28818 break 28819 } 28820 v.reset(OpAMD64SARQ) 28821 v.Type = t 28822 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 28823 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 28824 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 28825 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 28826 v3.AuxInt = int32ToAuxInt(64) 28827 v3.AddArg(y) 28828 v2.AddArg(v3) 28829 v1.AddArg(v2) 28830 v0.AddArg2(y, v1) 28831 v.AddArg2(x, v0) 28832 return true 28833 } 28834 // match: (Rsh64x32 x y) 28835 // cond: shiftIsBounded(v) 28836 // result: (SARQ x y) 28837 for { 28838 x := v_0 28839 y := v_1 28840 if !(shiftIsBounded(v)) { 28841 break 28842 } 28843 v.reset(OpAMD64SARQ) 28844 v.AddArg2(x, y) 28845 return true 28846 } 28847 return false 28848 } 28849 func rewriteValueAMD64_OpRsh64x64(v *Value) bool { 28850 v_1 := v.Args[1] 28851 v_0 := v.Args[0] 28852 b := v.Block 28853 // match: (Rsh64x64 <t> x y) 28854 // cond: !shiftIsBounded(v) 28855 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 28856 for { 28857 t := v.Type 28858 x := v_0 28859 y := v_1 28860 if !(!shiftIsBounded(v)) { 28861 break 28862 } 28863 v.reset(OpAMD64SARQ) 28864 v.Type = t 28865 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 28866 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 28867 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 28868 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 28869 v3.AuxInt = int32ToAuxInt(64) 28870 v3.AddArg(y) 28871 v2.AddArg(v3) 28872 v1.AddArg(v2) 28873 v0.AddArg2(y, v1) 28874 v.AddArg2(x, v0) 28875 return true 28876 } 28877 // match: (Rsh64x64 x y) 28878 // cond: shiftIsBounded(v) 28879 // result: (SARQ x y) 28880 for { 28881 x := v_0 28882 y := v_1 28883 if !(shiftIsBounded(v)) { 28884 break 28885 } 28886 v.reset(OpAMD64SARQ) 28887 v.AddArg2(x, y) 28888 return true 28889 } 28890 return false 28891 } 28892 func rewriteValueAMD64_OpRsh64x8(v *Value) bool { 28893 v_1 := v.Args[1] 28894 v_0 := v.Args[0] 28895 b := v.Block 28896 // match: (Rsh64x8 <t> x y) 28897 // cond: !shiftIsBounded(v) 28898 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 28899 for { 28900 t := v.Type 28901 x := v_0 28902 y := v_1 28903 if !(!shiftIsBounded(v)) { 28904 break 28905 } 28906 v.reset(OpAMD64SARQ) 28907 v.Type = t 28908 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 28909 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 28910 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 28911 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 28912 v3.AuxInt = int8ToAuxInt(64) 28913 v3.AddArg(y) 28914 v2.AddArg(v3) 28915 v1.AddArg(v2) 28916 v0.AddArg2(y, v1) 28917 v.AddArg2(x, v0) 28918 return true 28919 } 28920 // match: (Rsh64x8 x y) 28921 // cond: shiftIsBounded(v) 28922 // result: (SARQ x y) 28923 for { 28924 x := v_0 28925 y := v_1 28926 if !(shiftIsBounded(v)) { 28927 break 28928 } 28929 v.reset(OpAMD64SARQ) 28930 v.AddArg2(x, y) 28931 return true 28932 } 28933 return false 28934 } 28935 func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { 28936 v_1 := v.Args[1] 28937 v_0 := v.Args[0] 28938 b := v.Block 28939 // match: (Rsh8Ux16 <t> x y) 28940 // cond: !shiftIsBounded(v) 28941 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 28942 for { 28943 t := v.Type 28944 x := v_0 28945 y := v_1 28946 if !(!shiftIsBounded(v)) { 28947 break 28948 } 28949 v.reset(OpAMD64ANDL) 28950 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 28951 v0.AddArg2(x, y) 28952 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 28953 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 28954 v2.AuxInt = int16ToAuxInt(8) 28955 v2.AddArg(y) 28956 v1.AddArg(v2) 28957 v.AddArg2(v0, v1) 28958 return true 28959 } 28960 // match: (Rsh8Ux16 x y) 28961 // cond: shiftIsBounded(v) 28962 // result: (SHRB x y) 28963 for { 28964 x := v_0 28965 y := v_1 28966 if !(shiftIsBounded(v)) { 28967 break 28968 } 28969 v.reset(OpAMD64SHRB) 28970 v.AddArg2(x, y) 28971 return true 28972 } 28973 return false 28974 } 28975 func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { 28976 v_1 := v.Args[1] 28977 v_0 := v.Args[0] 28978 b := v.Block 28979 // match: (Rsh8Ux32 <t> x y) 28980 // cond: !shiftIsBounded(v) 28981 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 28982 for { 28983 t := v.Type 28984 x := v_0 28985 y := v_1 28986 if !(!shiftIsBounded(v)) { 28987 break 28988 } 28989 v.reset(OpAMD64ANDL) 28990 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 28991 v0.AddArg2(x, y) 28992 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 28993 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 28994 v2.AuxInt = int32ToAuxInt(8) 28995 v2.AddArg(y) 28996 v1.AddArg(v2) 28997 v.AddArg2(v0, v1) 28998 return true 28999 } 29000 // match: (Rsh8Ux32 x y) 29001 // cond: shiftIsBounded(v) 29002 // result: (SHRB x y) 29003 for { 29004 x := v_0 29005 y := v_1 29006 if !(shiftIsBounded(v)) { 29007 break 29008 } 29009 v.reset(OpAMD64SHRB) 29010 v.AddArg2(x, y) 29011 return true 29012 } 29013 return false 29014 } 29015 func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { 29016 v_1 := v.Args[1] 29017 v_0 := v.Args[0] 29018 b := v.Block 29019 // match: (Rsh8Ux64 <t> x y) 29020 // cond: !shiftIsBounded(v) 29021 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 29022 for { 29023 t := v.Type 29024 x := v_0 29025 y := v_1 29026 if !(!shiftIsBounded(v)) { 29027 break 29028 } 29029 v.reset(OpAMD64ANDL) 29030 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 29031 v0.AddArg2(x, y) 29032 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 29033 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 29034 v2.AuxInt = int32ToAuxInt(8) 29035 v2.AddArg(y) 29036 v1.AddArg(v2) 29037 v.AddArg2(v0, v1) 29038 return true 29039 } 29040 // match: (Rsh8Ux64 x y) 29041 // cond: shiftIsBounded(v) 29042 // result: (SHRB x y) 29043 for { 29044 x := v_0 29045 y := v_1 29046 if !(shiftIsBounded(v)) { 29047 break 29048 } 29049 v.reset(OpAMD64SHRB) 29050 v.AddArg2(x, y) 29051 return true 29052 } 29053 return false 29054 } 29055 func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { 29056 v_1 := v.Args[1] 29057 v_0 := v.Args[0] 29058 b := v.Block 29059 // match: (Rsh8Ux8 <t> x y) 29060 // cond: !shiftIsBounded(v) 29061 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 29062 for { 29063 t := v.Type 29064 x := v_0 29065 y := v_1 29066 if !(!shiftIsBounded(v)) { 29067 break 29068 } 29069 v.reset(OpAMD64ANDL) 29070 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 29071 v0.AddArg2(x, y) 29072 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 29073 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 29074 v2.AuxInt = int8ToAuxInt(8) 29075 v2.AddArg(y) 29076 v1.AddArg(v2) 29077 v.AddArg2(v0, v1) 29078 return true 29079 } 29080 // match: (Rsh8Ux8 x y) 29081 // cond: shiftIsBounded(v) 29082 // result: (SHRB x y) 29083 for { 29084 x := v_0 29085 y := v_1 29086 if !(shiftIsBounded(v)) { 29087 break 29088 } 29089 v.reset(OpAMD64SHRB) 29090 v.AddArg2(x, y) 29091 return true 29092 } 29093 return false 29094 } 29095 func rewriteValueAMD64_OpRsh8x16(v *Value) bool { 29096 v_1 := v.Args[1] 29097 v_0 := v.Args[0] 29098 b := v.Block 29099 // match: (Rsh8x16 <t> x y) 29100 // cond: !shiftIsBounded(v) 29101 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 29102 for { 29103 t := v.Type 29104 x := v_0 29105 y := v_1 29106 if !(!shiftIsBounded(v)) { 29107 break 29108 } 29109 v.reset(OpAMD64SARB) 29110 v.Type = t 29111 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 29112 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 29113 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 29114 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 29115 v3.AuxInt = int16ToAuxInt(8) 29116 v3.AddArg(y) 29117 v2.AddArg(v3) 29118 v1.AddArg(v2) 29119 v0.AddArg2(y, v1) 29120 v.AddArg2(x, v0) 29121 return true 29122 } 29123 // match: (Rsh8x16 x y) 29124 // cond: shiftIsBounded(v) 29125 // result: (SARB x y) 29126 for { 29127 x := v_0 29128 y := v_1 29129 if !(shiftIsBounded(v)) { 29130 break 29131 } 29132 v.reset(OpAMD64SARB) 29133 v.AddArg2(x, y) 29134 return true 29135 } 29136 return false 29137 } 29138 func rewriteValueAMD64_OpRsh8x32(v *Value) bool { 29139 v_1 := v.Args[1] 29140 v_0 := v.Args[0] 29141 b := v.Block 29142 // match: (Rsh8x32 <t> x y) 29143 // cond: !shiftIsBounded(v) 29144 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 29145 for { 29146 t := v.Type 29147 x := v_0 29148 y := v_1 29149 if !(!shiftIsBounded(v)) { 29150 break 29151 } 29152 v.reset(OpAMD64SARB) 29153 v.Type = t 29154 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 29155 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 29156 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 29157 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 29158 v3.AuxInt = int32ToAuxInt(8) 29159 v3.AddArg(y) 29160 v2.AddArg(v3) 29161 v1.AddArg(v2) 29162 v0.AddArg2(y, v1) 29163 v.AddArg2(x, v0) 29164 return true 29165 } 29166 // match: (Rsh8x32 x y) 29167 // cond: shiftIsBounded(v) 29168 // result: (SARB x y) 29169 for { 29170 x := v_0 29171 y := v_1 29172 if !(shiftIsBounded(v)) { 29173 break 29174 } 29175 v.reset(OpAMD64SARB) 29176 v.AddArg2(x, y) 29177 return true 29178 } 29179 return false 29180 } 29181 func rewriteValueAMD64_OpRsh8x64(v *Value) bool { 29182 v_1 := v.Args[1] 29183 v_0 := v.Args[0] 29184 b := v.Block 29185 // match: (Rsh8x64 <t> x y) 29186 // cond: !shiftIsBounded(v) 29187 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 29188 for { 29189 t := v.Type 29190 x := v_0 29191 y := v_1 29192 if !(!shiftIsBounded(v)) { 29193 break 29194 } 29195 v.reset(OpAMD64SARB) 29196 v.Type = t 29197 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 29198 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 29199 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 29200 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 29201 v3.AuxInt = int32ToAuxInt(8) 29202 v3.AddArg(y) 29203 v2.AddArg(v3) 29204 v1.AddArg(v2) 29205 v0.AddArg2(y, v1) 29206 v.AddArg2(x, v0) 29207 return true 29208 } 29209 // match: (Rsh8x64 x y) 29210 // cond: shiftIsBounded(v) 29211 // result: (SARB x y) 29212 for { 29213 x := v_0 29214 y := v_1 29215 if !(shiftIsBounded(v)) { 29216 break 29217 } 29218 v.reset(OpAMD64SARB) 29219 v.AddArg2(x, y) 29220 return true 29221 } 29222 return false 29223 } 29224 func rewriteValueAMD64_OpRsh8x8(v *Value) bool { 29225 v_1 := v.Args[1] 29226 v_0 := v.Args[0] 29227 b := v.Block 29228 // match: (Rsh8x8 <t> x y) 29229 // cond: !shiftIsBounded(v) 29230 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 29231 for { 29232 t := v.Type 29233 x := v_0 29234 y := v_1 29235 if !(!shiftIsBounded(v)) { 29236 break 29237 } 29238 v.reset(OpAMD64SARB) 29239 v.Type = t 29240 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 29241 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 29242 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 29243 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 29244 v3.AuxInt = int8ToAuxInt(8) 29245 v3.AddArg(y) 29246 v2.AddArg(v3) 29247 v1.AddArg(v2) 29248 v0.AddArg2(y, v1) 29249 v.AddArg2(x, v0) 29250 return true 29251 } 29252 // match: (Rsh8x8 x y) 29253 // cond: shiftIsBounded(v) 29254 // result: (SARB x y) 29255 for { 29256 x := v_0 29257 y := v_1 29258 if !(shiftIsBounded(v)) { 29259 break 29260 } 29261 v.reset(OpAMD64SARB) 29262 v.AddArg2(x, y) 29263 return true 29264 } 29265 return false 29266 } 29267 func rewriteValueAMD64_OpSelect0(v *Value) bool { 29268 v_0 := v.Args[0] 29269 b := v.Block 29270 typ := &b.Func.Config.Types 29271 // match: (Select0 (Mul64uover x y)) 29272 // result: (Select0 <typ.UInt64> (MULQU x y)) 29273 for { 29274 if v_0.Op != OpMul64uover { 29275 break 29276 } 29277 y := v_0.Args[1] 29278 x := v_0.Args[0] 29279 v.reset(OpSelect0) 29280 v.Type = typ.UInt64 29281 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) 29282 v0.AddArg2(x, y) 29283 v.AddArg(v0) 29284 return true 29285 } 29286 // match: (Select0 (Mul32uover x y)) 29287 // result: (Select0 <typ.UInt32> (MULLU x y)) 29288 for { 29289 if v_0.Op != OpMul32uover { 29290 break 29291 } 29292 y := v_0.Args[1] 29293 x := v_0.Args[0] 29294 v.reset(OpSelect0) 29295 v.Type = typ.UInt32 29296 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) 29297 v0.AddArg2(x, y) 29298 v.AddArg(v0) 29299 return true 29300 } 29301 // match: (Select0 (Add64carry x y c)) 29302 // result: (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c)))) 29303 for { 29304 if v_0.Op != OpAdd64carry { 29305 break 29306 } 29307 c := v_0.Args[2] 29308 x := v_0.Args[0] 29309 y := v_0.Args[1] 29310 v.reset(OpSelect0) 29311 v.Type = typ.UInt64 29312 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 29313 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 29314 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 29315 v2.AddArg(c) 29316 v1.AddArg(v2) 29317 v0.AddArg3(x, y, v1) 29318 v.AddArg(v0) 29319 return true 29320 } 29321 // match: (Select0 (Sub64borrow x y c)) 29322 // result: (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c)))) 29323 for { 29324 if v_0.Op != OpSub64borrow { 29325 break 29326 } 29327 c := v_0.Args[2] 29328 x := v_0.Args[0] 29329 y := v_0.Args[1] 29330 v.reset(OpSelect0) 29331 v.Type = typ.UInt64 29332 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 29333 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 29334 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 29335 v2.AddArg(c) 29336 v1.AddArg(v2) 29337 v0.AddArg3(x, y, v1) 29338 v.AddArg(v0) 29339 return true 29340 } 29341 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 29342 // result: (ADDL val (Select0 <t> tuple)) 29343 for { 29344 t := v.Type 29345 if v_0.Op != OpAMD64AddTupleFirst32 { 29346 break 29347 } 29348 tuple := v_0.Args[1] 29349 val := v_0.Args[0] 29350 v.reset(OpAMD64ADDL) 29351 v0 := b.NewValue0(v.Pos, OpSelect0, t) 29352 v0.AddArg(tuple) 29353 v.AddArg2(val, v0) 29354 return true 29355 } 29356 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 29357 // result: (ADDQ val (Select0 <t> tuple)) 29358 for { 29359 t := v.Type 29360 if v_0.Op != OpAMD64AddTupleFirst64 { 29361 break 29362 } 29363 tuple := v_0.Args[1] 29364 val := v_0.Args[0] 29365 v.reset(OpAMD64ADDQ) 29366 v0 := b.NewValue0(v.Pos, OpSelect0, t) 29367 v0.AddArg(tuple) 29368 v.AddArg2(val, v0) 29369 return true 29370 } 29371 return false 29372 } 29373 func rewriteValueAMD64_OpSelect1(v *Value) bool { 29374 v_0 := v.Args[0] 29375 b := v.Block 29376 typ := &b.Func.Config.Types 29377 // match: (Select1 (Mul64uover x y)) 29378 // result: (SETO (Select1 <types.TypeFlags> (MULQU x y))) 29379 for { 29380 if v_0.Op != OpMul64uover { 29381 break 29382 } 29383 y := v_0.Args[1] 29384 x := v_0.Args[0] 29385 v.reset(OpAMD64SETO) 29386 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 29387 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) 29388 v1.AddArg2(x, y) 29389 v0.AddArg(v1) 29390 v.AddArg(v0) 29391 return true 29392 } 29393 // match: (Select1 (Mul32uover x y)) 29394 // result: (SETO (Select1 <types.TypeFlags> (MULLU x y))) 29395 for { 29396 if v_0.Op != OpMul32uover { 29397 break 29398 } 29399 y := v_0.Args[1] 29400 x := v_0.Args[0] 29401 v.reset(OpAMD64SETO) 29402 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 29403 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) 29404 v1.AddArg2(x, y) 29405 v0.AddArg(v1) 29406 v.AddArg(v0) 29407 return true 29408 } 29409 // match: (Select1 (Add64carry x y c)) 29410 // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c)))))) 29411 for { 29412 if v_0.Op != OpAdd64carry { 29413 break 29414 } 29415 c := v_0.Args[2] 29416 x := v_0.Args[0] 29417 y := v_0.Args[1] 29418 v.reset(OpAMD64NEGQ) 29419 v.Type = typ.UInt64 29420 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) 29421 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 29422 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 29423 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 29424 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 29425 v4.AddArg(c) 29426 v3.AddArg(v4) 29427 v2.AddArg3(x, y, v3) 29428 v1.AddArg(v2) 29429 v0.AddArg(v1) 29430 v.AddArg(v0) 29431 return true 29432 } 29433 // match: (Select1 (Sub64borrow x y c)) 29434 // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c)))))) 29435 for { 29436 if v_0.Op != OpSub64borrow { 29437 break 29438 } 29439 c := v_0.Args[2] 29440 x := v_0.Args[0] 29441 y := v_0.Args[1] 29442 v.reset(OpAMD64NEGQ) 29443 v.Type = typ.UInt64 29444 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) 29445 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 29446 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 29447 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 29448 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 29449 v4.AddArg(c) 29450 v3.AddArg(v4) 29451 v2.AddArg3(x, y, v3) 29452 v1.AddArg(v2) 29453 v0.AddArg(v1) 29454 v.AddArg(v0) 29455 return true 29456 } 29457 // match: (Select1 (NEGLflags (MOVQconst [0]))) 29458 // result: (FlagEQ) 29459 for { 29460 if v_0.Op != OpAMD64NEGLflags { 29461 break 29462 } 29463 v_0_0 := v_0.Args[0] 29464 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 { 29465 break 29466 } 29467 v.reset(OpAMD64FlagEQ) 29468 return true 29469 } 29470 // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) 29471 // result: x 29472 for { 29473 if v_0.Op != OpAMD64NEGLflags { 29474 break 29475 } 29476 v_0_0 := v_0.Args[0] 29477 if v_0_0.Op != OpAMD64NEGQ { 29478 break 29479 } 29480 v_0_0_0 := v_0_0.Args[0] 29481 if v_0_0_0.Op != OpAMD64SBBQcarrymask { 29482 break 29483 } 29484 x := v_0_0_0.Args[0] 29485 v.copyOf(x) 29486 return true 29487 } 29488 // match: (Select1 (AddTupleFirst32 _ tuple)) 29489 // result: (Select1 tuple) 29490 for { 29491 if v_0.Op != OpAMD64AddTupleFirst32 { 29492 break 29493 } 29494 tuple := v_0.Args[1] 29495 v.reset(OpSelect1) 29496 v.AddArg(tuple) 29497 return true 29498 } 29499 // match: (Select1 (AddTupleFirst64 _ tuple)) 29500 // result: (Select1 tuple) 29501 for { 29502 if v_0.Op != OpAMD64AddTupleFirst64 { 29503 break 29504 } 29505 tuple := v_0.Args[1] 29506 v.reset(OpSelect1) 29507 v.AddArg(tuple) 29508 return true 29509 } 29510 return false 29511 } 29512 func rewriteValueAMD64_OpSelectN(v *Value) bool { 29513 v_0 := v.Args[0] 29514 b := v.Block 29515 config := b.Func.Config 29516 // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem))))) 29517 // cond: sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call) 29518 // result: (Move [sc.Val64()] dst src mem) 29519 for { 29520 if auxIntToInt64(v.AuxInt) != 0 { 29521 break 29522 } 29523 call := v_0 29524 if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 { 29525 break 29526 } 29527 sym := auxToCall(call.Aux) 29528 s1 := call.Args[0] 29529 if s1.Op != OpAMD64MOVQstoreconst { 29530 break 29531 } 29532 sc := auxIntToValAndOff(s1.AuxInt) 29533 _ = s1.Args[1] 29534 s2 := s1.Args[1] 29535 if s2.Op != OpAMD64MOVQstore { 29536 break 29537 } 29538 _ = s2.Args[2] 29539 src := s2.Args[1] 29540 s3 := s2.Args[2] 29541 if s3.Op != OpAMD64MOVQstore { 29542 break 29543 } 29544 mem := s3.Args[2] 29545 dst := s3.Args[1] 29546 if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) { 29547 break 29548 } 29549 v.reset(OpMove) 29550 v.AuxInt = int64ToAuxInt(sc.Val64()) 29551 v.AddArg3(dst, src, mem) 29552 return true 29553 } 29554 // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem)) 29555 // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call) 29556 // result: (Move [sz] dst src mem) 29557 for { 29558 if auxIntToInt64(v.AuxInt) != 0 { 29559 break 29560 } 29561 call := v_0 29562 if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 { 29563 break 29564 } 29565 sym := auxToCall(call.Aux) 29566 mem := call.Args[3] 29567 dst := call.Args[0] 29568 src := call.Args[1] 29569 call_2 := call.Args[2] 29570 if call_2.Op != OpAMD64MOVQconst { 29571 break 29572 } 29573 sz := auxIntToInt64(call_2.AuxInt) 29574 if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) { 29575 break 29576 } 29577 v.reset(OpMove) 29578 v.AuxInt = int64ToAuxInt(sz) 29579 v.AddArg3(dst, src, mem) 29580 return true 29581 } 29582 return false 29583 } 29584 func rewriteValueAMD64_OpSlicemask(v *Value) bool { 29585 v_0 := v.Args[0] 29586 b := v.Block 29587 // match: (Slicemask <t> x) 29588 // result: (SARQconst (NEGQ <t> x) [63]) 29589 for { 29590 t := v.Type 29591 x := v_0 29592 v.reset(OpAMD64SARQconst) 29593 v.AuxInt = int8ToAuxInt(63) 29594 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 29595 v0.AddArg(x) 29596 v.AddArg(v0) 29597 return true 29598 } 29599 } 29600 func rewriteValueAMD64_OpSpectreIndex(v *Value) bool { 29601 v_1 := v.Args[1] 29602 v_0 := v.Args[0] 29603 b := v.Block 29604 typ := &b.Func.Config.Types 29605 // match: (SpectreIndex <t> x y) 29606 // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y)) 29607 for { 29608 x := v_0 29609 y := v_1 29610 v.reset(OpAMD64CMOVQCC) 29611 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 29612 v0.AuxInt = int64ToAuxInt(0) 29613 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 29614 v1.AddArg2(x, y) 29615 v.AddArg3(x, v0, v1) 29616 return true 29617 } 29618 } 29619 func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { 29620 v_1 := v.Args[1] 29621 v_0 := v.Args[0] 29622 b := v.Block 29623 typ := &b.Func.Config.Types 29624 // match: (SpectreSliceIndex <t> x y) 29625 // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) 29626 for { 29627 x := v_0 29628 y := v_1 29629 v.reset(OpAMD64CMOVQHI) 29630 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 29631 v0.AuxInt = int64ToAuxInt(0) 29632 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 29633 v1.AddArg2(x, y) 29634 v.AddArg3(x, v0, v1) 29635 return true 29636 } 29637 } 29638 func rewriteValueAMD64_OpStore(v *Value) bool { 29639 v_2 := v.Args[2] 29640 v_1 := v.Args[1] 29641 v_0 := v.Args[0] 29642 // match: (Store {t} ptr val mem) 29643 // cond: t.Size() == 8 && t.IsFloat() 29644 // result: (MOVSDstore ptr val mem) 29645 for { 29646 t := auxToType(v.Aux) 29647 ptr := v_0 29648 val := v_1 29649 mem := v_2 29650 if !(t.Size() == 8 && t.IsFloat()) { 29651 break 29652 } 29653 v.reset(OpAMD64MOVSDstore) 29654 v.AddArg3(ptr, val, mem) 29655 return true 29656 } 29657 // match: (Store {t} ptr val mem) 29658 // cond: t.Size() == 4 && t.IsFloat() 29659 // result: (MOVSSstore ptr val mem) 29660 for { 29661 t := auxToType(v.Aux) 29662 ptr := v_0 29663 val := v_1 29664 mem := v_2 29665 if !(t.Size() == 4 && t.IsFloat()) { 29666 break 29667 } 29668 v.reset(OpAMD64MOVSSstore) 29669 v.AddArg3(ptr, val, mem) 29670 return true 29671 } 29672 // match: (Store {t} ptr val mem) 29673 // cond: t.Size() == 8 && !t.IsFloat() 29674 // result: (MOVQstore ptr val mem) 29675 for { 29676 t := auxToType(v.Aux) 29677 ptr := v_0 29678 val := v_1 29679 mem := v_2 29680 if !(t.Size() == 8 && !t.IsFloat()) { 29681 break 29682 } 29683 v.reset(OpAMD64MOVQstore) 29684 v.AddArg3(ptr, val, mem) 29685 return true 29686 } 29687 // match: (Store {t} ptr val mem) 29688 // cond: t.Size() == 4 && !t.IsFloat() 29689 // result: (MOVLstore ptr val mem) 29690 for { 29691 t := auxToType(v.Aux) 29692 ptr := v_0 29693 val := v_1 29694 mem := v_2 29695 if !(t.Size() == 4 && !t.IsFloat()) { 29696 break 29697 } 29698 v.reset(OpAMD64MOVLstore) 29699 v.AddArg3(ptr, val, mem) 29700 return true 29701 } 29702 // match: (Store {t} ptr val mem) 29703 // cond: t.Size() == 2 29704 // result: (MOVWstore ptr val mem) 29705 for { 29706 t := auxToType(v.Aux) 29707 ptr := v_0 29708 val := v_1 29709 mem := v_2 29710 if !(t.Size() == 2) { 29711 break 29712 } 29713 v.reset(OpAMD64MOVWstore) 29714 v.AddArg3(ptr, val, mem) 29715 return true 29716 } 29717 // match: (Store {t} ptr val mem) 29718 // cond: t.Size() == 1 29719 // result: (MOVBstore ptr val mem) 29720 for { 29721 t := auxToType(v.Aux) 29722 ptr := v_0 29723 val := v_1 29724 mem := v_2 29725 if !(t.Size() == 1) { 29726 break 29727 } 29728 v.reset(OpAMD64MOVBstore) 29729 v.AddArg3(ptr, val, mem) 29730 return true 29731 } 29732 return false 29733 } 29734 func rewriteValueAMD64_OpTrunc(v *Value) bool { 29735 v_0 := v.Args[0] 29736 // match: (Trunc x) 29737 // result: (ROUNDSD [3] x) 29738 for { 29739 x := v_0 29740 v.reset(OpAMD64ROUNDSD) 29741 v.AuxInt = int8ToAuxInt(3) 29742 v.AddArg(x) 29743 return true 29744 } 29745 } 29746 func rewriteValueAMD64_OpZero(v *Value) bool { 29747 v_1 := v.Args[1] 29748 v_0 := v.Args[0] 29749 b := v.Block 29750 config := b.Func.Config 29751 typ := &b.Func.Config.Types 29752 // match: (Zero [0] _ mem) 29753 // result: mem 29754 for { 29755 if auxIntToInt64(v.AuxInt) != 0 { 29756 break 29757 } 29758 mem := v_1 29759 v.copyOf(mem) 29760 return true 29761 } 29762 // match: (Zero [1] destptr mem) 29763 // result: (MOVBstoreconst [makeValAndOff(0,0)] destptr mem) 29764 for { 29765 if auxIntToInt64(v.AuxInt) != 1 { 29766 break 29767 } 29768 destptr := v_0 29769 mem := v_1 29770 v.reset(OpAMD64MOVBstoreconst) 29771 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29772 v.AddArg2(destptr, mem) 29773 return true 29774 } 29775 // match: (Zero [2] destptr mem) 29776 // result: (MOVWstoreconst [makeValAndOff(0,0)] destptr mem) 29777 for { 29778 if auxIntToInt64(v.AuxInt) != 2 { 29779 break 29780 } 29781 destptr := v_0 29782 mem := v_1 29783 v.reset(OpAMD64MOVWstoreconst) 29784 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29785 v.AddArg2(destptr, mem) 29786 return true 29787 } 29788 // match: (Zero [4] destptr mem) 29789 // result: (MOVLstoreconst [makeValAndOff(0,0)] destptr mem) 29790 for { 29791 if auxIntToInt64(v.AuxInt) != 4 { 29792 break 29793 } 29794 destptr := v_0 29795 mem := v_1 29796 v.reset(OpAMD64MOVLstoreconst) 29797 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29798 v.AddArg2(destptr, mem) 29799 return true 29800 } 29801 // match: (Zero [8] destptr mem) 29802 // result: (MOVQstoreconst [makeValAndOff(0,0)] destptr mem) 29803 for { 29804 if auxIntToInt64(v.AuxInt) != 8 { 29805 break 29806 } 29807 destptr := v_0 29808 mem := v_1 29809 v.reset(OpAMD64MOVQstoreconst) 29810 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29811 v.AddArg2(destptr, mem) 29812 return true 29813 } 29814 // match: (Zero [3] destptr mem) 29815 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)) 29816 for { 29817 if auxIntToInt64(v.AuxInt) != 3 { 29818 break 29819 } 29820 destptr := v_0 29821 mem := v_1 29822 v.reset(OpAMD64MOVBstoreconst) 29823 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2)) 29824 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 29825 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29826 v0.AddArg2(destptr, mem) 29827 v.AddArg2(destptr, v0) 29828 return true 29829 } 29830 // match: (Zero [5] destptr mem) 29831 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) 29832 for { 29833 if auxIntToInt64(v.AuxInt) != 5 { 29834 break 29835 } 29836 destptr := v_0 29837 mem := v_1 29838 v.reset(OpAMD64MOVBstoreconst) 29839 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) 29840 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 29841 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29842 v0.AddArg2(destptr, mem) 29843 v.AddArg2(destptr, v0) 29844 return true 29845 } 29846 // match: (Zero [6] destptr mem) 29847 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) 29848 for { 29849 if auxIntToInt64(v.AuxInt) != 6 { 29850 break 29851 } 29852 destptr := v_0 29853 mem := v_1 29854 v.reset(OpAMD64MOVWstoreconst) 29855 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) 29856 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 29857 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29858 v0.AddArg2(destptr, mem) 29859 v.AddArg2(destptr, v0) 29860 return true 29861 } 29862 // match: (Zero [7] destptr mem) 29863 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) 29864 for { 29865 if auxIntToInt64(v.AuxInt) != 7 { 29866 break 29867 } 29868 destptr := v_0 29869 mem := v_1 29870 v.reset(OpAMD64MOVLstoreconst) 29871 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3)) 29872 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 29873 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29874 v0.AddArg2(destptr, mem) 29875 v.AddArg2(destptr, v0) 29876 return true 29877 } 29878 // match: (Zero [s] destptr mem) 29879 // cond: s%8 != 0 && s > 8 && !config.useSSE 29880 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) 29881 for { 29882 s := auxIntToInt64(v.AuxInt) 29883 destptr := v_0 29884 mem := v_1 29885 if !(s%8 != 0 && s > 8 && !config.useSSE) { 29886 break 29887 } 29888 v.reset(OpZero) 29889 v.AuxInt = int64ToAuxInt(s - s%8) 29890 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 29891 v0.AuxInt = int64ToAuxInt(s % 8) 29892 v0.AddArg(destptr) 29893 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 29894 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29895 v1.AddArg2(destptr, mem) 29896 v.AddArg2(v0, v1) 29897 return true 29898 } 29899 // match: (Zero [16] destptr mem) 29900 // cond: !config.useSSE 29901 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) 29902 for { 29903 if auxIntToInt64(v.AuxInt) != 16 { 29904 break 29905 } 29906 destptr := v_0 29907 mem := v_1 29908 if !(!config.useSSE) { 29909 break 29910 } 29911 v.reset(OpAMD64MOVQstoreconst) 29912 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) 29913 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 29914 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29915 v0.AddArg2(destptr, mem) 29916 v.AddArg2(destptr, v0) 29917 return true 29918 } 29919 // match: (Zero [24] destptr mem) 29920 // cond: !config.useSSE 29921 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))) 29922 for { 29923 if auxIntToInt64(v.AuxInt) != 24 { 29924 break 29925 } 29926 destptr := v_0 29927 mem := v_1 29928 if !(!config.useSSE) { 29929 break 29930 } 29931 v.reset(OpAMD64MOVQstoreconst) 29932 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) 29933 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 29934 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) 29935 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 29936 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29937 v1.AddArg2(destptr, mem) 29938 v0.AddArg2(destptr, v1) 29939 v.AddArg2(destptr, v0) 29940 return true 29941 } 29942 // match: (Zero [32] destptr mem) 29943 // cond: !config.useSSE 29944 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))) 29945 for { 29946 if auxIntToInt64(v.AuxInt) != 32 { 29947 break 29948 } 29949 destptr := v_0 29950 mem := v_1 29951 if !(!config.useSSE) { 29952 break 29953 } 29954 v.reset(OpAMD64MOVQstoreconst) 29955 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 24)) 29956 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 29957 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) 29958 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 29959 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) 29960 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 29961 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29962 v2.AddArg2(destptr, mem) 29963 v1.AddArg2(destptr, v2) 29964 v0.AddArg2(destptr, v1) 29965 v.AddArg2(destptr, v0) 29966 return true 29967 } 29968 // match: (Zero [9] destptr mem) 29969 // cond: config.useSSE 29970 // result: (MOVBstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) 29971 for { 29972 if auxIntToInt64(v.AuxInt) != 9 { 29973 break 29974 } 29975 destptr := v_0 29976 mem := v_1 29977 if !(config.useSSE) { 29978 break 29979 } 29980 v.reset(OpAMD64MOVBstoreconst) 29981 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) 29982 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 29983 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 29984 v0.AddArg2(destptr, mem) 29985 v.AddArg2(destptr, v0) 29986 return true 29987 } 29988 // match: (Zero [10] destptr mem) 29989 // cond: config.useSSE 29990 // result: (MOVWstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) 29991 for { 29992 if auxIntToInt64(v.AuxInt) != 10 { 29993 break 29994 } 29995 destptr := v_0 29996 mem := v_1 29997 if !(config.useSSE) { 29998 break 29999 } 30000 v.reset(OpAMD64MOVWstoreconst) 30001 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) 30002 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 30003 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 30004 v0.AddArg2(destptr, mem) 30005 v.AddArg2(destptr, v0) 30006 return true 30007 } 30008 // match: (Zero [11] destptr mem) 30009 // cond: config.useSSE 30010 // result: (MOVLstoreconst [makeValAndOff(0,7)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) 30011 for { 30012 if auxIntToInt64(v.AuxInt) != 11 { 30013 break 30014 } 30015 destptr := v_0 30016 mem := v_1 30017 if !(config.useSSE) { 30018 break 30019 } 30020 v.reset(OpAMD64MOVLstoreconst) 30021 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 7)) 30022 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 30023 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 30024 v0.AddArg2(destptr, mem) 30025 v.AddArg2(destptr, v0) 30026 return true 30027 } 30028 // match: (Zero [12] destptr mem) 30029 // cond: config.useSSE 30030 // result: (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) 30031 for { 30032 if auxIntToInt64(v.AuxInt) != 12 { 30033 break 30034 } 30035 destptr := v_0 30036 mem := v_1 30037 if !(config.useSSE) { 30038 break 30039 } 30040 v.reset(OpAMD64MOVLstoreconst) 30041 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) 30042 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 30043 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 30044 v0.AddArg2(destptr, mem) 30045 v.AddArg2(destptr, v0) 30046 return true 30047 } 30048 // match: (Zero [s] destptr mem) 30049 // cond: s > 12 && s < 16 && config.useSSE 30050 // result: (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) 30051 for { 30052 s := auxIntToInt64(v.AuxInt) 30053 destptr := v_0 30054 mem := v_1 30055 if !(s > 12 && s < 16 && config.useSSE) { 30056 break 30057 } 30058 v.reset(OpAMD64MOVQstoreconst) 30059 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8))) 30060 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 30061 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 30062 v0.AddArg2(destptr, mem) 30063 v.AddArg2(destptr, v0) 30064 return true 30065 } 30066 // match: (Zero [s] destptr mem) 30067 // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE 30068 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) 30069 for { 30070 s := auxIntToInt64(v.AuxInt) 30071 destptr := v_0 30072 mem := v_1 30073 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { 30074 break 30075 } 30076 v.reset(OpZero) 30077 v.AuxInt = int64ToAuxInt(s - s%16) 30078 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 30079 v0.AuxInt = int64ToAuxInt(s % 16) 30080 v0.AddArg(destptr) 30081 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 30082 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 30083 v1.AddArg2(destptr, mem) 30084 v.AddArg2(v0, v1) 30085 return true 30086 } 30087 // match: (Zero [s] destptr mem) 30088 // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE 30089 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) 30090 for { 30091 s := auxIntToInt64(v.AuxInt) 30092 destptr := v_0 30093 mem := v_1 30094 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { 30095 break 30096 } 30097 v.reset(OpZero) 30098 v.AuxInt = int64ToAuxInt(s - s%16) 30099 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 30100 v0.AuxInt = int64ToAuxInt(s % 16) 30101 v0.AddArg(destptr) 30102 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 30103 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 30104 v1.AddArg2(destptr, mem) 30105 v.AddArg2(v0, v1) 30106 return true 30107 } 30108 // match: (Zero [16] destptr mem) 30109 // cond: config.useSSE 30110 // result: (MOVOstoreconst [makeValAndOff(0,0)] destptr mem) 30111 for { 30112 if auxIntToInt64(v.AuxInt) != 16 { 30113 break 30114 } 30115 destptr := v_0 30116 mem := v_1 30117 if !(config.useSSE) { 30118 break 30119 } 30120 v.reset(OpAMD64MOVOstoreconst) 30121 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 30122 v.AddArg2(destptr, mem) 30123 return true 30124 } 30125 // match: (Zero [32] destptr mem) 30126 // cond: config.useSSE 30127 // result: (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) 30128 for { 30129 if auxIntToInt64(v.AuxInt) != 32 { 30130 break 30131 } 30132 destptr := v_0 30133 mem := v_1 30134 if !(config.useSSE) { 30135 break 30136 } 30137 v.reset(OpAMD64MOVOstoreconst) 30138 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) 30139 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 30140 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 30141 v0.AddArg2(destptr, mem) 30142 v.AddArg2(destptr, v0) 30143 return true 30144 } 30145 // match: (Zero [48] destptr mem) 30146 // cond: config.useSSE 30147 // result: (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))) 30148 for { 30149 if auxIntToInt64(v.AuxInt) != 48 { 30150 break 30151 } 30152 destptr := v_0 30153 mem := v_1 30154 if !(config.useSSE) { 30155 break 30156 } 30157 v.reset(OpAMD64MOVOstoreconst) 30158 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32)) 30159 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 30160 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) 30161 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 30162 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 30163 v1.AddArg2(destptr, mem) 30164 v0.AddArg2(destptr, v1) 30165 v.AddArg2(destptr, v0) 30166 return true 30167 } 30168 // match: (Zero [64] destptr mem) 30169 // cond: config.useSSE 30170 // result: (MOVOstoreconst [makeValAndOff(0,48)] destptr (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))) 30171 for { 30172 if auxIntToInt64(v.AuxInt) != 64 { 30173 break 30174 } 30175 destptr := v_0 30176 mem := v_1 30177 if !(config.useSSE) { 30178 break 30179 } 30180 v.reset(OpAMD64MOVOstoreconst) 30181 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48)) 30182 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 30183 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32)) 30184 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 30185 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) 30186 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) 30187 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) 30188 v2.AddArg2(destptr, mem) 30189 v1.AddArg2(destptr, v2) 30190 v0.AddArg2(destptr, v1) 30191 v.AddArg2(destptr, v0) 30192 return true 30193 } 30194 // match: (Zero [s] destptr mem) 30195 // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice 30196 // result: (DUFFZERO [s] destptr mem) 30197 for { 30198 s := auxIntToInt64(v.AuxInt) 30199 destptr := v_0 30200 mem := v_1 30201 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 30202 break 30203 } 30204 v.reset(OpAMD64DUFFZERO) 30205 v.AuxInt = int64ToAuxInt(s) 30206 v.AddArg2(destptr, mem) 30207 return true 30208 } 30209 // match: (Zero [s] destptr mem) 30210 // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 30211 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 30212 for { 30213 s := auxIntToInt64(v.AuxInt) 30214 destptr := v_0 30215 mem := v_1 30216 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { 30217 break 30218 } 30219 v.reset(OpAMD64REPSTOSQ) 30220 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 30221 v0.AuxInt = int64ToAuxInt(s / 8) 30222 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 30223 v1.AuxInt = int64ToAuxInt(0) 30224 v.AddArg4(destptr, v0, v1, mem) 30225 return true 30226 } 30227 return false 30228 } 30229 func rewriteBlockAMD64(b *Block) bool { 30230 typ := &b.Func.Config.Types 30231 switch b.Kind { 30232 case BlockAMD64EQ: 30233 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 30234 // result: (UGE (BTL x y)) 30235 for b.Controls[0].Op == OpAMD64TESTL { 30236 v_0 := b.Controls[0] 30237 _ = v_0.Args[1] 30238 v_0_0 := v_0.Args[0] 30239 v_0_1 := v_0.Args[1] 30240 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 30241 if v_0_0.Op != OpAMD64SHLL { 30242 continue 30243 } 30244 x := v_0_0.Args[1] 30245 v_0_0_0 := v_0_0.Args[0] 30246 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { 30247 continue 30248 } 30249 y := v_0_1 30250 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) 30251 v0.AddArg2(x, y) 30252 b.resetWithControl(BlockAMD64UGE, v0) 30253 return true 30254 } 30255 break 30256 } 30257 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 30258 // result: (UGE (BTQ x y)) 30259 for b.Controls[0].Op == OpAMD64TESTQ { 30260 v_0 := b.Controls[0] 30261 _ = v_0.Args[1] 30262 v_0_0 := v_0.Args[0] 30263 v_0_1 := v_0.Args[1] 30264 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 30265 if v_0_0.Op != OpAMD64SHLQ { 30266 continue 30267 } 30268 x := v_0_0.Args[1] 30269 v_0_0_0 := v_0_0.Args[0] 30270 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { 30271 continue 30272 } 30273 y := v_0_1 30274 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) 30275 v0.AddArg2(x, y) 30276 b.resetWithControl(BlockAMD64UGE, v0) 30277 return true 30278 } 30279 break 30280 } 30281 // match: (EQ (TESTLconst [c] x)) 30282 // cond: isUint32PowerOfTwo(int64(c)) 30283 // result: (UGE (BTLconst [int8(log32(c))] x)) 30284 for b.Controls[0].Op == OpAMD64TESTLconst { 30285 v_0 := b.Controls[0] 30286 c := auxIntToInt32(v_0.AuxInt) 30287 x := v_0.Args[0] 30288 if !(isUint32PowerOfTwo(int64(c))) { 30289 break 30290 } 30291 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 30292 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 30293 v0.AddArg(x) 30294 b.resetWithControl(BlockAMD64UGE, v0) 30295 return true 30296 } 30297 // match: (EQ (TESTQconst [c] x)) 30298 // cond: isUint64PowerOfTwo(int64(c)) 30299 // result: (UGE (BTQconst [int8(log32(c))] x)) 30300 for b.Controls[0].Op == OpAMD64TESTQconst { 30301 v_0 := b.Controls[0] 30302 c := auxIntToInt32(v_0.AuxInt) 30303 x := v_0.Args[0] 30304 if !(isUint64PowerOfTwo(int64(c))) { 30305 break 30306 } 30307 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 30308 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 30309 v0.AddArg(x) 30310 b.resetWithControl(BlockAMD64UGE, v0) 30311 return true 30312 } 30313 // match: (EQ (TESTQ (MOVQconst [c]) x)) 30314 // cond: isUint64PowerOfTwo(c) 30315 // result: (UGE (BTQconst [int8(log64(c))] x)) 30316 for b.Controls[0].Op == OpAMD64TESTQ { 30317 v_0 := b.Controls[0] 30318 _ = v_0.Args[1] 30319 v_0_0 := v_0.Args[0] 30320 v_0_1 := v_0.Args[1] 30321 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 30322 if v_0_0.Op != OpAMD64MOVQconst { 30323 continue 30324 } 30325 c := auxIntToInt64(v_0_0.AuxInt) 30326 x := v_0_1 30327 if !(isUint64PowerOfTwo(c)) { 30328 continue 30329 } 30330 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 30331 v0.AuxInt = int8ToAuxInt(int8(log64(c))) 30332 v0.AddArg(x) 30333 b.resetWithControl(BlockAMD64UGE, v0) 30334 return true 30335 } 30336 break 30337 } 30338 // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 30339 // cond: z1==z2 30340 // result: (UGE (BTQconst [63] x)) 30341 for b.Controls[0].Op == OpAMD64TESTQ { 30342 v_0 := b.Controls[0] 30343 _ = v_0.Args[1] 30344 v_0_0 := v_0.Args[0] 30345 v_0_1 := v_0.Args[1] 30346 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 30347 z1 := v_0_0 30348 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { 30349 continue 30350 } 30351 z1_0 := z1.Args[0] 30352 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 30353 continue 30354 } 30355 x := z1_0.Args[0] 30356 z2 := v_0_1 30357 if !(z1 == z2) { 30358 continue 30359 } 30360 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 30361 v0.AuxInt = int8ToAuxInt(63) 30362 v0.AddArg(x) 30363 b.resetWithControl(BlockAMD64UGE, v0) 30364 return true 30365 } 30366 break 30367 } 30368 // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 30369 // cond: z1==z2 30370 // result: (UGE (BTQconst [31] x)) 30371 for b.Controls[0].Op == OpAMD64TESTL { 30372 v_0 := b.Controls[0] 30373 _ = v_0.Args[1] 30374 v_0_0 := v_0.Args[0] 30375 v_0_1 := v_0.Args[1] 30376 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 30377 z1 := v_0_0 30378 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { 30379 continue 30380 } 30381 z1_0 := z1.Args[0] 30382 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { 30383 continue 30384 } 30385 x := z1_0.Args[0] 30386 z2 := v_0_1 30387 if !(z1 == z2) { 30388 continue 30389 } 30390 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 30391 v0.AuxInt = int8ToAuxInt(31) 30392 v0.AddArg(x) 30393 b.resetWithControl(BlockAMD64UGE, v0) 30394 return true 30395 } 30396 break 30397 } 30398 // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 30399 // cond: z1==z2 30400 // result: (UGE (BTQconst [0] x)) 30401 for b.Controls[0].Op == OpAMD64TESTQ { 30402 v_0 := b.Controls[0] 30403 _ = v_0.Args[1] 30404 v_0_0 := v_0.Args[0] 30405 v_0_1 := v_0.Args[1] 30406 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 30407 z1 := v_0_0 30408 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 30409 continue 30410 } 30411 z1_0 := z1.Args[0] 30412 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 30413 continue 30414 } 30415 x := z1_0.Args[0] 30416 z2 := v_0_1 30417 if !(z1 == z2) { 30418 continue 30419 } 30420 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 30421 v0.AuxInt = int8ToAuxInt(0) 30422 v0.AddArg(x) 30423 b.resetWithControl(BlockAMD64UGE, v0) 30424 return true 30425 } 30426 break 30427 } 30428 // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 30429 // cond: z1==z2 30430 // result: (UGE (BTLconst [0] x)) 30431 for b.Controls[0].Op == OpAMD64TESTL { 30432 v_0 := b.Controls[0] 30433 _ = v_0.Args[1] 30434 v_0_0 := v_0.Args[0] 30435 v_0_1 := v_0.Args[1] 30436 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 30437 z1 := v_0_0 30438 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 30439 continue 30440 } 30441 z1_0 := z1.Args[0] 30442 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 30443 continue 30444 } 30445 x := z1_0.Args[0] 30446 z2 := v_0_1 30447 if !(z1 == z2) { 30448 continue 30449 } 30450 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 30451 v0.AuxInt = int8ToAuxInt(0) 30452 v0.AddArg(x) 30453 b.resetWithControl(BlockAMD64UGE, v0) 30454 return true 30455 } 30456 break 30457 } 30458 // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2)) 30459 // cond: z1==z2 30460 // result: (UGE (BTQconst [63] x)) 30461 for b.Controls[0].Op == OpAMD64TESTQ { 30462 v_0 := b.Controls[0] 30463 _ = v_0.Args[1] 30464 v_0_0 := v_0.Args[0] 30465 v_0_1 := v_0.Args[1] 30466 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 30467 z1 := v_0_0 30468 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 30469 continue 30470 } 30471 x := z1.Args[0] 30472 z2 := v_0_1 30473 if !(z1 == z2) { 30474 continue 30475 } 30476 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 30477 v0.AuxInt = int8ToAuxInt(63) 30478 v0.AddArg(x) 30479 b.resetWithControl(BlockAMD64UGE, v0) 30480 return true 30481 } 30482 break 30483 } 30484 // match: (EQ (TESTL z1:(SHRLconst [31] x) z2)) 30485 // cond: z1==z2 30486 // result: (UGE (BTLconst [31] x)) 30487 for b.Controls[0].Op == OpAMD64TESTL { 30488 v_0 := b.Controls[0] 30489 _ = v_0.Args[1] 30490 v_0_0 := v_0.Args[0] 30491 v_0_1 := v_0.Args[1] 30492 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 30493 z1 := v_0_0 30494 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 30495 continue 30496 } 30497 x := z1.Args[0] 30498 z2 := v_0_1 30499 if !(z1 == z2) { 30500 continue 30501 } 30502 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 30503 v0.AuxInt = int8ToAuxInt(31) 30504 v0.AddArg(x) 30505 b.resetWithControl(BlockAMD64UGE, v0) 30506 return true 30507 } 30508 break 30509 } 30510 // match: (EQ (InvertFlags cmp) yes no) 30511 // result: (EQ cmp yes no) 30512 for b.Controls[0].Op == OpAMD64InvertFlags { 30513 v_0 := b.Controls[0] 30514 cmp := v_0.Args[0] 30515 b.resetWithControl(BlockAMD64EQ, cmp) 30516 return true 30517 } 30518 // match: (EQ (FlagEQ) yes no) 30519 // result: (First yes no) 30520 for b.Controls[0].Op == OpAMD64FlagEQ { 30521 b.Reset(BlockFirst) 30522 return true 30523 } 30524 // match: (EQ (FlagLT_ULT) yes no) 30525 // result: (First no yes) 30526 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 30527 b.Reset(BlockFirst) 30528 b.swapSuccessors() 30529 return true 30530 } 30531 // match: (EQ (FlagLT_UGT) yes no) 30532 // result: (First no yes) 30533 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 30534 b.Reset(BlockFirst) 30535 b.swapSuccessors() 30536 return true 30537 } 30538 // match: (EQ (FlagGT_ULT) yes no) 30539 // result: (First no yes) 30540 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 30541 b.Reset(BlockFirst) 30542 b.swapSuccessors() 30543 return true 30544 } 30545 // match: (EQ (FlagGT_UGT) yes no) 30546 // result: (First no yes) 30547 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 30548 b.Reset(BlockFirst) 30549 b.swapSuccessors() 30550 return true 30551 } 30552 // match: (EQ (TESTQ s:(Select0 blsr:(BLSRQ _)) s) yes no) 30553 // result: (EQ (Select1 <types.TypeFlags> blsr) yes no) 30554 for b.Controls[0].Op == OpAMD64TESTQ { 30555 v_0 := b.Controls[0] 30556 _ = v_0.Args[1] 30557 v_0_0 := v_0.Args[0] 30558 v_0_1 := v_0.Args[1] 30559 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 30560 s := v_0_0 30561 if s.Op != OpSelect0 { 30562 continue 30563 } 30564 blsr := s.Args[0] 30565 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 { 30566 continue 30567 } 30568 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) 30569 v0.AddArg(blsr) 30570 b.resetWithControl(BlockAMD64EQ, v0) 30571 return true 30572 } 30573 break 30574 } 30575 // match: (EQ (TESTL s:(Select0 blsr:(BLSRL _)) s) yes no) 30576 // result: (EQ (Select1 <types.TypeFlags> blsr) yes no) 30577 for b.Controls[0].Op == OpAMD64TESTL { 30578 v_0 := b.Controls[0] 30579 _ = v_0.Args[1] 30580 v_0_0 := v_0.Args[0] 30581 v_0_1 := v_0.Args[1] 30582 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 30583 s := v_0_0 30584 if s.Op != OpSelect0 { 30585 continue 30586 } 30587 blsr := s.Args[0] 30588 if blsr.Op != OpAMD64BLSRL || s != v_0_1 { 30589 continue 30590 } 30591 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) 30592 v0.AddArg(blsr) 30593 b.resetWithControl(BlockAMD64EQ, v0) 30594 return true 30595 } 30596 break 30597 } 30598 case BlockAMD64GE: 30599 // match: (GE (InvertFlags cmp) yes no) 30600 // result: (LE cmp yes no) 30601 for b.Controls[0].Op == OpAMD64InvertFlags { 30602 v_0 := b.Controls[0] 30603 cmp := v_0.Args[0] 30604 b.resetWithControl(BlockAMD64LE, cmp) 30605 return true 30606 } 30607 // match: (GE (FlagEQ) yes no) 30608 // result: (First yes no) 30609 for b.Controls[0].Op == OpAMD64FlagEQ { 30610 b.Reset(BlockFirst) 30611 return true 30612 } 30613 // match: (GE (FlagLT_ULT) yes no) 30614 // result: (First no yes) 30615 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 30616 b.Reset(BlockFirst) 30617 b.swapSuccessors() 30618 return true 30619 } 30620 // match: (GE (FlagLT_UGT) yes no) 30621 // result: (First no yes) 30622 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 30623 b.Reset(BlockFirst) 30624 b.swapSuccessors() 30625 return true 30626 } 30627 // match: (GE (FlagGT_ULT) yes no) 30628 // result: (First yes no) 30629 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 30630 b.Reset(BlockFirst) 30631 return true 30632 } 30633 // match: (GE (FlagGT_UGT) yes no) 30634 // result: (First yes no) 30635 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 30636 b.Reset(BlockFirst) 30637 return true 30638 } 30639 case BlockAMD64GT: 30640 // match: (GT (InvertFlags cmp) yes no) 30641 // result: (LT cmp yes no) 30642 for b.Controls[0].Op == OpAMD64InvertFlags { 30643 v_0 := b.Controls[0] 30644 cmp := v_0.Args[0] 30645 b.resetWithControl(BlockAMD64LT, cmp) 30646 return true 30647 } 30648 // match: (GT (FlagEQ) yes no) 30649 // result: (First no yes) 30650 for b.Controls[0].Op == OpAMD64FlagEQ { 30651 b.Reset(BlockFirst) 30652 b.swapSuccessors() 30653 return true 30654 } 30655 // match: (GT (FlagLT_ULT) yes no) 30656 // result: (First no yes) 30657 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 30658 b.Reset(BlockFirst) 30659 b.swapSuccessors() 30660 return true 30661 } 30662 // match: (GT (FlagLT_UGT) yes no) 30663 // result: (First no yes) 30664 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 30665 b.Reset(BlockFirst) 30666 b.swapSuccessors() 30667 return true 30668 } 30669 // match: (GT (FlagGT_ULT) yes no) 30670 // result: (First yes no) 30671 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 30672 b.Reset(BlockFirst) 30673 return true 30674 } 30675 // match: (GT (FlagGT_UGT) yes no) 30676 // result: (First yes no) 30677 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 30678 b.Reset(BlockFirst) 30679 return true 30680 } 30681 case BlockIf: 30682 // match: (If (SETL cmp) yes no) 30683 // result: (LT cmp yes no) 30684 for b.Controls[0].Op == OpAMD64SETL { 30685 v_0 := b.Controls[0] 30686 cmp := v_0.Args[0] 30687 b.resetWithControl(BlockAMD64LT, cmp) 30688 return true 30689 } 30690 // match: (If (SETLE cmp) yes no) 30691 // result: (LE cmp yes no) 30692 for b.Controls[0].Op == OpAMD64SETLE { 30693 v_0 := b.Controls[0] 30694 cmp := v_0.Args[0] 30695 b.resetWithControl(BlockAMD64LE, cmp) 30696 return true 30697 } 30698 // match: (If (SETG cmp) yes no) 30699 // result: (GT cmp yes no) 30700 for b.Controls[0].Op == OpAMD64SETG { 30701 v_0 := b.Controls[0] 30702 cmp := v_0.Args[0] 30703 b.resetWithControl(BlockAMD64GT, cmp) 30704 return true 30705 } 30706 // match: (If (SETGE cmp) yes no) 30707 // result: (GE cmp yes no) 30708 for b.Controls[0].Op == OpAMD64SETGE { 30709 v_0 := b.Controls[0] 30710 cmp := v_0.Args[0] 30711 b.resetWithControl(BlockAMD64GE, cmp) 30712 return true 30713 } 30714 // match: (If (SETEQ cmp) yes no) 30715 // result: (EQ cmp yes no) 30716 for b.Controls[0].Op == OpAMD64SETEQ { 30717 v_0 := b.Controls[0] 30718 cmp := v_0.Args[0] 30719 b.resetWithControl(BlockAMD64EQ, cmp) 30720 return true 30721 } 30722 // match: (If (SETNE cmp) yes no) 30723 // result: (NE cmp yes no) 30724 for b.Controls[0].Op == OpAMD64SETNE { 30725 v_0 := b.Controls[0] 30726 cmp := v_0.Args[0] 30727 b.resetWithControl(BlockAMD64NE, cmp) 30728 return true 30729 } 30730 // match: (If (SETB cmp) yes no) 30731 // result: (ULT cmp yes no) 30732 for b.Controls[0].Op == OpAMD64SETB { 30733 v_0 := b.Controls[0] 30734 cmp := v_0.Args[0] 30735 b.resetWithControl(BlockAMD64ULT, cmp) 30736 return true 30737 } 30738 // match: (If (SETBE cmp) yes no) 30739 // result: (ULE cmp yes no) 30740 for b.Controls[0].Op == OpAMD64SETBE { 30741 v_0 := b.Controls[0] 30742 cmp := v_0.Args[0] 30743 b.resetWithControl(BlockAMD64ULE, cmp) 30744 return true 30745 } 30746 // match: (If (SETA cmp) yes no) 30747 // result: (UGT cmp yes no) 30748 for b.Controls[0].Op == OpAMD64SETA { 30749 v_0 := b.Controls[0] 30750 cmp := v_0.Args[0] 30751 b.resetWithControl(BlockAMD64UGT, cmp) 30752 return true 30753 } 30754 // match: (If (SETAE cmp) yes no) 30755 // result: (UGE cmp yes no) 30756 for b.Controls[0].Op == OpAMD64SETAE { 30757 v_0 := b.Controls[0] 30758 cmp := v_0.Args[0] 30759 b.resetWithControl(BlockAMD64UGE, cmp) 30760 return true 30761 } 30762 // match: (If (SETO cmp) yes no) 30763 // result: (OS cmp yes no) 30764 for b.Controls[0].Op == OpAMD64SETO { 30765 v_0 := b.Controls[0] 30766 cmp := v_0.Args[0] 30767 b.resetWithControl(BlockAMD64OS, cmp) 30768 return true 30769 } 30770 // match: (If (SETGF cmp) yes no) 30771 // result: (UGT cmp yes no) 30772 for b.Controls[0].Op == OpAMD64SETGF { 30773 v_0 := b.Controls[0] 30774 cmp := v_0.Args[0] 30775 b.resetWithControl(BlockAMD64UGT, cmp) 30776 return true 30777 } 30778 // match: (If (SETGEF cmp) yes no) 30779 // result: (UGE cmp yes no) 30780 for b.Controls[0].Op == OpAMD64SETGEF { 30781 v_0 := b.Controls[0] 30782 cmp := v_0.Args[0] 30783 b.resetWithControl(BlockAMD64UGE, cmp) 30784 return true 30785 } 30786 // match: (If (SETEQF cmp) yes no) 30787 // result: (EQF cmp yes no) 30788 for b.Controls[0].Op == OpAMD64SETEQF { 30789 v_0 := b.Controls[0] 30790 cmp := v_0.Args[0] 30791 b.resetWithControl(BlockAMD64EQF, cmp) 30792 return true 30793 } 30794 // match: (If (SETNEF cmp) yes no) 30795 // result: (NEF cmp yes no) 30796 for b.Controls[0].Op == OpAMD64SETNEF { 30797 v_0 := b.Controls[0] 30798 cmp := v_0.Args[0] 30799 b.resetWithControl(BlockAMD64NEF, cmp) 30800 return true 30801 } 30802 // match: (If cond yes no) 30803 // result: (NE (TESTB cond cond) yes no) 30804 for { 30805 cond := b.Controls[0] 30806 v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags) 30807 v0.AddArg2(cond, cond) 30808 b.resetWithControl(BlockAMD64NE, v0) 30809 return true 30810 } 30811 case BlockJumpTable: 30812 // match: (JumpTable idx) 30813 // result: (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ <typ.Uintptr> {makeJumpTableSym(b)} (SB))) 30814 for { 30815 idx := b.Controls[0] 30816 v0 := b.NewValue0(b.Pos, OpAMD64LEAQ, typ.Uintptr) 30817 v0.Aux = symToAux(makeJumpTableSym(b)) 30818 v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr) 30819 v0.AddArg(v1) 30820 b.resetWithControl2(BlockAMD64JUMPTABLE, idx, v0) 30821 b.Aux = symToAux(makeJumpTableSym(b)) 30822 return true 30823 } 30824 case BlockAMD64LE: 30825 // match: (LE (InvertFlags cmp) yes no) 30826 // result: (GE cmp yes no) 30827 for b.Controls[0].Op == OpAMD64InvertFlags { 30828 v_0 := b.Controls[0] 30829 cmp := v_0.Args[0] 30830 b.resetWithControl(BlockAMD64GE, cmp) 30831 return true 30832 } 30833 // match: (LE (FlagEQ) yes no) 30834 // result: (First yes no) 30835 for b.Controls[0].Op == OpAMD64FlagEQ { 30836 b.Reset(BlockFirst) 30837 return true 30838 } 30839 // match: (LE (FlagLT_ULT) yes no) 30840 // result: (First yes no) 30841 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 30842 b.Reset(BlockFirst) 30843 return true 30844 } 30845 // match: (LE (FlagLT_UGT) yes no) 30846 // result: (First yes no) 30847 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 30848 b.Reset(BlockFirst) 30849 return true 30850 } 30851 // match: (LE (FlagGT_ULT) yes no) 30852 // result: (First no yes) 30853 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 30854 b.Reset(BlockFirst) 30855 b.swapSuccessors() 30856 return true 30857 } 30858 // match: (LE (FlagGT_UGT) yes no) 30859 // result: (First no yes) 30860 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 30861 b.Reset(BlockFirst) 30862 b.swapSuccessors() 30863 return true 30864 } 30865 case BlockAMD64LT: 30866 // match: (LT (InvertFlags cmp) yes no) 30867 // result: (GT cmp yes no) 30868 for b.Controls[0].Op == OpAMD64InvertFlags { 30869 v_0 := b.Controls[0] 30870 cmp := v_0.Args[0] 30871 b.resetWithControl(BlockAMD64GT, cmp) 30872 return true 30873 } 30874 // match: (LT (FlagEQ) yes no) 30875 // result: (First no yes) 30876 for b.Controls[0].Op == OpAMD64FlagEQ { 30877 b.Reset(BlockFirst) 30878 b.swapSuccessors() 30879 return true 30880 } 30881 // match: (LT (FlagLT_ULT) yes no) 30882 // result: (First yes no) 30883 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 30884 b.Reset(BlockFirst) 30885 return true 30886 } 30887 // match: (LT (FlagLT_UGT) yes no) 30888 // result: (First yes no) 30889 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 30890 b.Reset(BlockFirst) 30891 return true 30892 } 30893 // match: (LT (FlagGT_ULT) yes no) 30894 // result: (First no yes) 30895 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 30896 b.Reset(BlockFirst) 30897 b.swapSuccessors() 30898 return true 30899 } 30900 // match: (LT (FlagGT_UGT) yes no) 30901 // result: (First no yes) 30902 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 30903 b.Reset(BlockFirst) 30904 b.swapSuccessors() 30905 return true 30906 } 30907 case BlockAMD64NE: 30908 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 30909 // result: (LT cmp yes no) 30910 for b.Controls[0].Op == OpAMD64TESTB { 30911 v_0 := b.Controls[0] 30912 _ = v_0.Args[1] 30913 v_0_0 := v_0.Args[0] 30914 if v_0_0.Op != OpAMD64SETL { 30915 break 30916 } 30917 cmp := v_0_0.Args[0] 30918 v_0_1 := v_0.Args[1] 30919 if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] { 30920 break 30921 } 30922 b.resetWithControl(BlockAMD64LT, cmp) 30923 return true 30924 } 30925 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 30926 // result: (LE cmp yes no) 30927 for b.Controls[0].Op == OpAMD64TESTB { 30928 v_0 := b.Controls[0] 30929 _ = v_0.Args[1] 30930 v_0_0 := v_0.Args[0] 30931 if v_0_0.Op != OpAMD64SETLE { 30932 break 30933 } 30934 cmp := v_0_0.Args[0] 30935 v_0_1 := v_0.Args[1] 30936 if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] { 30937 break 30938 } 30939 b.resetWithControl(BlockAMD64LE, cmp) 30940 return true 30941 } 30942 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 30943 // result: (GT cmp yes no) 30944 for b.Controls[0].Op == OpAMD64TESTB { 30945 v_0 := b.Controls[0] 30946 _ = v_0.Args[1] 30947 v_0_0 := v_0.Args[0] 30948 if v_0_0.Op != OpAMD64SETG { 30949 break 30950 } 30951 cmp := v_0_0.Args[0] 30952 v_0_1 := v_0.Args[1] 30953 if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] { 30954 break 30955 } 30956 b.resetWithControl(BlockAMD64GT, cmp) 30957 return true 30958 } 30959 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 30960 // result: (GE cmp yes no) 30961 for b.Controls[0].Op == OpAMD64TESTB { 30962 v_0 := b.Controls[0] 30963 _ = v_0.Args[1] 30964 v_0_0 := v_0.Args[0] 30965 if v_0_0.Op != OpAMD64SETGE { 30966 break 30967 } 30968 cmp := v_0_0.Args[0] 30969 v_0_1 := v_0.Args[1] 30970 if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] { 30971 break 30972 } 30973 b.resetWithControl(BlockAMD64GE, cmp) 30974 return true 30975 } 30976 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 30977 // result: (EQ cmp yes no) 30978 for b.Controls[0].Op == OpAMD64TESTB { 30979 v_0 := b.Controls[0] 30980 _ = v_0.Args[1] 30981 v_0_0 := v_0.Args[0] 30982 if v_0_0.Op != OpAMD64SETEQ { 30983 break 30984 } 30985 cmp := v_0_0.Args[0] 30986 v_0_1 := v_0.Args[1] 30987 if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] { 30988 break 30989 } 30990 b.resetWithControl(BlockAMD64EQ, cmp) 30991 return true 30992 } 30993 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 30994 // result: (NE cmp yes no) 30995 for b.Controls[0].Op == OpAMD64TESTB { 30996 v_0 := b.Controls[0] 30997 _ = v_0.Args[1] 30998 v_0_0 := v_0.Args[0] 30999 if v_0_0.Op != OpAMD64SETNE { 31000 break 31001 } 31002 cmp := v_0_0.Args[0] 31003 v_0_1 := v_0.Args[1] 31004 if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] { 31005 break 31006 } 31007 b.resetWithControl(BlockAMD64NE, cmp) 31008 return true 31009 } 31010 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 31011 // result: (ULT cmp yes no) 31012 for b.Controls[0].Op == OpAMD64TESTB { 31013 v_0 := b.Controls[0] 31014 _ = v_0.Args[1] 31015 v_0_0 := v_0.Args[0] 31016 if v_0_0.Op != OpAMD64SETB { 31017 break 31018 } 31019 cmp := v_0_0.Args[0] 31020 v_0_1 := v_0.Args[1] 31021 if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] { 31022 break 31023 } 31024 b.resetWithControl(BlockAMD64ULT, cmp) 31025 return true 31026 } 31027 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 31028 // result: (ULE cmp yes no) 31029 for b.Controls[0].Op == OpAMD64TESTB { 31030 v_0 := b.Controls[0] 31031 _ = v_0.Args[1] 31032 v_0_0 := v_0.Args[0] 31033 if v_0_0.Op != OpAMD64SETBE { 31034 break 31035 } 31036 cmp := v_0_0.Args[0] 31037 v_0_1 := v_0.Args[1] 31038 if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] { 31039 break 31040 } 31041 b.resetWithControl(BlockAMD64ULE, cmp) 31042 return true 31043 } 31044 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 31045 // result: (UGT cmp yes no) 31046 for b.Controls[0].Op == OpAMD64TESTB { 31047 v_0 := b.Controls[0] 31048 _ = v_0.Args[1] 31049 v_0_0 := v_0.Args[0] 31050 if v_0_0.Op != OpAMD64SETA { 31051 break 31052 } 31053 cmp := v_0_0.Args[0] 31054 v_0_1 := v_0.Args[1] 31055 if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] { 31056 break 31057 } 31058 b.resetWithControl(BlockAMD64UGT, cmp) 31059 return true 31060 } 31061 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 31062 // result: (UGE cmp yes no) 31063 for b.Controls[0].Op == OpAMD64TESTB { 31064 v_0 := b.Controls[0] 31065 _ = v_0.Args[1] 31066 v_0_0 := v_0.Args[0] 31067 if v_0_0.Op != OpAMD64SETAE { 31068 break 31069 } 31070 cmp := v_0_0.Args[0] 31071 v_0_1 := v_0.Args[1] 31072 if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] { 31073 break 31074 } 31075 b.resetWithControl(BlockAMD64UGE, cmp) 31076 return true 31077 } 31078 // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) 31079 // result: (OS cmp yes no) 31080 for b.Controls[0].Op == OpAMD64TESTB { 31081 v_0 := b.Controls[0] 31082 _ = v_0.Args[1] 31083 v_0_0 := v_0.Args[0] 31084 if v_0_0.Op != OpAMD64SETO { 31085 break 31086 } 31087 cmp := v_0_0.Args[0] 31088 v_0_1 := v_0.Args[1] 31089 if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] { 31090 break 31091 } 31092 b.resetWithControl(BlockAMD64OS, cmp) 31093 return true 31094 } 31095 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 31096 // result: (ULT (BTL x y)) 31097 for b.Controls[0].Op == OpAMD64TESTL { 31098 v_0 := b.Controls[0] 31099 _ = v_0.Args[1] 31100 v_0_0 := v_0.Args[0] 31101 v_0_1 := v_0.Args[1] 31102 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 31103 if v_0_0.Op != OpAMD64SHLL { 31104 continue 31105 } 31106 x := v_0_0.Args[1] 31107 v_0_0_0 := v_0_0.Args[0] 31108 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { 31109 continue 31110 } 31111 y := v_0_1 31112 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) 31113 v0.AddArg2(x, y) 31114 b.resetWithControl(BlockAMD64ULT, v0) 31115 return true 31116 } 31117 break 31118 } 31119 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 31120 // result: (ULT (BTQ x y)) 31121 for b.Controls[0].Op == OpAMD64TESTQ { 31122 v_0 := b.Controls[0] 31123 _ = v_0.Args[1] 31124 v_0_0 := v_0.Args[0] 31125 v_0_1 := v_0.Args[1] 31126 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 31127 if v_0_0.Op != OpAMD64SHLQ { 31128 continue 31129 } 31130 x := v_0_0.Args[1] 31131 v_0_0_0 := v_0_0.Args[0] 31132 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { 31133 continue 31134 } 31135 y := v_0_1 31136 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) 31137 v0.AddArg2(x, y) 31138 b.resetWithControl(BlockAMD64ULT, v0) 31139 return true 31140 } 31141 break 31142 } 31143 // match: (NE (TESTLconst [c] x)) 31144 // cond: isUint32PowerOfTwo(int64(c)) 31145 // result: (ULT (BTLconst [int8(log32(c))] x)) 31146 for b.Controls[0].Op == OpAMD64TESTLconst { 31147 v_0 := b.Controls[0] 31148 c := auxIntToInt32(v_0.AuxInt) 31149 x := v_0.Args[0] 31150 if !(isUint32PowerOfTwo(int64(c))) { 31151 break 31152 } 31153 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 31154 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 31155 v0.AddArg(x) 31156 b.resetWithControl(BlockAMD64ULT, v0) 31157 return true 31158 } 31159 // match: (NE (TESTQconst [c] x)) 31160 // cond: isUint64PowerOfTwo(int64(c)) 31161 // result: (ULT (BTQconst [int8(log32(c))] x)) 31162 for b.Controls[0].Op == OpAMD64TESTQconst { 31163 v_0 := b.Controls[0] 31164 c := auxIntToInt32(v_0.AuxInt) 31165 x := v_0.Args[0] 31166 if !(isUint64PowerOfTwo(int64(c))) { 31167 break 31168 } 31169 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 31170 v0.AuxInt = int8ToAuxInt(int8(log32(c))) 31171 v0.AddArg(x) 31172 b.resetWithControl(BlockAMD64ULT, v0) 31173 return true 31174 } 31175 // match: (NE (TESTQ (MOVQconst [c]) x)) 31176 // cond: isUint64PowerOfTwo(c) 31177 // result: (ULT (BTQconst [int8(log64(c))] x)) 31178 for b.Controls[0].Op == OpAMD64TESTQ { 31179 v_0 := b.Controls[0] 31180 _ = v_0.Args[1] 31181 v_0_0 := v_0.Args[0] 31182 v_0_1 := v_0.Args[1] 31183 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 31184 if v_0_0.Op != OpAMD64MOVQconst { 31185 continue 31186 } 31187 c := auxIntToInt64(v_0_0.AuxInt) 31188 x := v_0_1 31189 if !(isUint64PowerOfTwo(c)) { 31190 continue 31191 } 31192 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 31193 v0.AuxInt = int8ToAuxInt(int8(log64(c))) 31194 v0.AddArg(x) 31195 b.resetWithControl(BlockAMD64ULT, v0) 31196 return true 31197 } 31198 break 31199 } 31200 // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 31201 // cond: z1==z2 31202 // result: (ULT (BTQconst [63] x)) 31203 for b.Controls[0].Op == OpAMD64TESTQ { 31204 v_0 := b.Controls[0] 31205 _ = v_0.Args[1] 31206 v_0_0 := v_0.Args[0] 31207 v_0_1 := v_0.Args[1] 31208 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 31209 z1 := v_0_0 31210 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { 31211 continue 31212 } 31213 z1_0 := z1.Args[0] 31214 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 31215 continue 31216 } 31217 x := z1_0.Args[0] 31218 z2 := v_0_1 31219 if !(z1 == z2) { 31220 continue 31221 } 31222 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 31223 v0.AuxInt = int8ToAuxInt(63) 31224 v0.AddArg(x) 31225 b.resetWithControl(BlockAMD64ULT, v0) 31226 return true 31227 } 31228 break 31229 } 31230 // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 31231 // cond: z1==z2 31232 // result: (ULT (BTQconst [31] x)) 31233 for b.Controls[0].Op == OpAMD64TESTL { 31234 v_0 := b.Controls[0] 31235 _ = v_0.Args[1] 31236 v_0_0 := v_0.Args[0] 31237 v_0_1 := v_0.Args[1] 31238 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 31239 z1 := v_0_0 31240 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { 31241 continue 31242 } 31243 z1_0 := z1.Args[0] 31244 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { 31245 continue 31246 } 31247 x := z1_0.Args[0] 31248 z2 := v_0_1 31249 if !(z1 == z2) { 31250 continue 31251 } 31252 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 31253 v0.AuxInt = int8ToAuxInt(31) 31254 v0.AddArg(x) 31255 b.resetWithControl(BlockAMD64ULT, v0) 31256 return true 31257 } 31258 break 31259 } 31260 // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 31261 // cond: z1==z2 31262 // result: (ULT (BTQconst [0] x)) 31263 for b.Controls[0].Op == OpAMD64TESTQ { 31264 v_0 := b.Controls[0] 31265 _ = v_0.Args[1] 31266 v_0_0 := v_0.Args[0] 31267 v_0_1 := v_0.Args[1] 31268 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 31269 z1 := v_0_0 31270 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 31271 continue 31272 } 31273 z1_0 := z1.Args[0] 31274 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { 31275 continue 31276 } 31277 x := z1_0.Args[0] 31278 z2 := v_0_1 31279 if !(z1 == z2) { 31280 continue 31281 } 31282 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 31283 v0.AuxInt = int8ToAuxInt(0) 31284 v0.AddArg(x) 31285 b.resetWithControl(BlockAMD64ULT, v0) 31286 return true 31287 } 31288 break 31289 } 31290 // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 31291 // cond: z1==z2 31292 // result: (ULT (BTLconst [0] x)) 31293 for b.Controls[0].Op == OpAMD64TESTL { 31294 v_0 := b.Controls[0] 31295 _ = v_0.Args[1] 31296 v_0_0 := v_0.Args[0] 31297 v_0_1 := v_0.Args[1] 31298 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 31299 z1 := v_0_0 31300 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 31301 continue 31302 } 31303 z1_0 := z1.Args[0] 31304 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { 31305 continue 31306 } 31307 x := z1_0.Args[0] 31308 z2 := v_0_1 31309 if !(z1 == z2) { 31310 continue 31311 } 31312 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 31313 v0.AuxInt = int8ToAuxInt(0) 31314 v0.AddArg(x) 31315 b.resetWithControl(BlockAMD64ULT, v0) 31316 return true 31317 } 31318 break 31319 } 31320 // match: (NE (TESTQ z1:(SHRQconst [63] x) z2)) 31321 // cond: z1==z2 31322 // result: (ULT (BTQconst [63] x)) 31323 for b.Controls[0].Op == OpAMD64TESTQ { 31324 v_0 := b.Controls[0] 31325 _ = v_0.Args[1] 31326 v_0_0 := v_0.Args[0] 31327 v_0_1 := v_0.Args[1] 31328 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 31329 z1 := v_0_0 31330 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { 31331 continue 31332 } 31333 x := z1.Args[0] 31334 z2 := v_0_1 31335 if !(z1 == z2) { 31336 continue 31337 } 31338 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 31339 v0.AuxInt = int8ToAuxInt(63) 31340 v0.AddArg(x) 31341 b.resetWithControl(BlockAMD64ULT, v0) 31342 return true 31343 } 31344 break 31345 } 31346 // match: (NE (TESTL z1:(SHRLconst [31] x) z2)) 31347 // cond: z1==z2 31348 // result: (ULT (BTLconst [31] x)) 31349 for b.Controls[0].Op == OpAMD64TESTL { 31350 v_0 := b.Controls[0] 31351 _ = v_0.Args[1] 31352 v_0_0 := v_0.Args[0] 31353 v_0_1 := v_0.Args[1] 31354 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 31355 z1 := v_0_0 31356 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { 31357 continue 31358 } 31359 x := z1.Args[0] 31360 z2 := v_0_1 31361 if !(z1 == z2) { 31362 continue 31363 } 31364 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 31365 v0.AuxInt = int8ToAuxInt(31) 31366 v0.AddArg(x) 31367 b.resetWithControl(BlockAMD64ULT, v0) 31368 return true 31369 } 31370 break 31371 } 31372 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 31373 // result: (UGT cmp yes no) 31374 for b.Controls[0].Op == OpAMD64TESTB { 31375 v_0 := b.Controls[0] 31376 _ = v_0.Args[1] 31377 v_0_0 := v_0.Args[0] 31378 if v_0_0.Op != OpAMD64SETGF { 31379 break 31380 } 31381 cmp := v_0_0.Args[0] 31382 v_0_1 := v_0.Args[1] 31383 if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] { 31384 break 31385 } 31386 b.resetWithControl(BlockAMD64UGT, cmp) 31387 return true 31388 } 31389 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 31390 // result: (UGE cmp yes no) 31391 for b.Controls[0].Op == OpAMD64TESTB { 31392 v_0 := b.Controls[0] 31393 _ = v_0.Args[1] 31394 v_0_0 := v_0.Args[0] 31395 if v_0_0.Op != OpAMD64SETGEF { 31396 break 31397 } 31398 cmp := v_0_0.Args[0] 31399 v_0_1 := v_0.Args[1] 31400 if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] { 31401 break 31402 } 31403 b.resetWithControl(BlockAMD64UGE, cmp) 31404 return true 31405 } 31406 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 31407 // result: (EQF cmp yes no) 31408 for b.Controls[0].Op == OpAMD64TESTB { 31409 v_0 := b.Controls[0] 31410 _ = v_0.Args[1] 31411 v_0_0 := v_0.Args[0] 31412 if v_0_0.Op != OpAMD64SETEQF { 31413 break 31414 } 31415 cmp := v_0_0.Args[0] 31416 v_0_1 := v_0.Args[1] 31417 if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] { 31418 break 31419 } 31420 b.resetWithControl(BlockAMD64EQF, cmp) 31421 return true 31422 } 31423 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 31424 // result: (NEF cmp yes no) 31425 for b.Controls[0].Op == OpAMD64TESTB { 31426 v_0 := b.Controls[0] 31427 _ = v_0.Args[1] 31428 v_0_0 := v_0.Args[0] 31429 if v_0_0.Op != OpAMD64SETNEF { 31430 break 31431 } 31432 cmp := v_0_0.Args[0] 31433 v_0_1 := v_0.Args[1] 31434 if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] { 31435 break 31436 } 31437 b.resetWithControl(BlockAMD64NEF, cmp) 31438 return true 31439 } 31440 // match: (NE (InvertFlags cmp) yes no) 31441 // result: (NE cmp yes no) 31442 for b.Controls[0].Op == OpAMD64InvertFlags { 31443 v_0 := b.Controls[0] 31444 cmp := v_0.Args[0] 31445 b.resetWithControl(BlockAMD64NE, cmp) 31446 return true 31447 } 31448 // match: (NE (FlagEQ) yes no) 31449 // result: (First no yes) 31450 for b.Controls[0].Op == OpAMD64FlagEQ { 31451 b.Reset(BlockFirst) 31452 b.swapSuccessors() 31453 return true 31454 } 31455 // match: (NE (FlagLT_ULT) yes no) 31456 // result: (First yes no) 31457 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 31458 b.Reset(BlockFirst) 31459 return true 31460 } 31461 // match: (NE (FlagLT_UGT) yes no) 31462 // result: (First yes no) 31463 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 31464 b.Reset(BlockFirst) 31465 return true 31466 } 31467 // match: (NE (FlagGT_ULT) yes no) 31468 // result: (First yes no) 31469 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 31470 b.Reset(BlockFirst) 31471 return true 31472 } 31473 // match: (NE (FlagGT_UGT) yes no) 31474 // result: (First yes no) 31475 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 31476 b.Reset(BlockFirst) 31477 return true 31478 } 31479 // match: (NE (TESTQ s:(Select0 blsr:(BLSRQ _)) s) yes no) 31480 // result: (NE (Select1 <types.TypeFlags> blsr) yes no) 31481 for b.Controls[0].Op == OpAMD64TESTQ { 31482 v_0 := b.Controls[0] 31483 _ = v_0.Args[1] 31484 v_0_0 := v_0.Args[0] 31485 v_0_1 := v_0.Args[1] 31486 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 31487 s := v_0_0 31488 if s.Op != OpSelect0 { 31489 continue 31490 } 31491 blsr := s.Args[0] 31492 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 { 31493 continue 31494 } 31495 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) 31496 v0.AddArg(blsr) 31497 b.resetWithControl(BlockAMD64NE, v0) 31498 return true 31499 } 31500 break 31501 } 31502 // match: (NE (TESTL s:(Select0 blsr:(BLSRL _)) s) yes no) 31503 // result: (NE (Select1 <types.TypeFlags> blsr) yes no) 31504 for b.Controls[0].Op == OpAMD64TESTL { 31505 v_0 := b.Controls[0] 31506 _ = v_0.Args[1] 31507 v_0_0 := v_0.Args[0] 31508 v_0_1 := v_0.Args[1] 31509 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { 31510 s := v_0_0 31511 if s.Op != OpSelect0 { 31512 continue 31513 } 31514 blsr := s.Args[0] 31515 if blsr.Op != OpAMD64BLSRL || s != v_0_1 { 31516 continue 31517 } 31518 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) 31519 v0.AddArg(blsr) 31520 b.resetWithControl(BlockAMD64NE, v0) 31521 return true 31522 } 31523 break 31524 } 31525 case BlockAMD64UGE: 31526 // match: (UGE (TESTQ x x) yes no) 31527 // result: (First yes no) 31528 for b.Controls[0].Op == OpAMD64TESTQ { 31529 v_0 := b.Controls[0] 31530 x := v_0.Args[1] 31531 if x != v_0.Args[0] { 31532 break 31533 } 31534 b.Reset(BlockFirst) 31535 return true 31536 } 31537 // match: (UGE (TESTL x x) yes no) 31538 // result: (First yes no) 31539 for b.Controls[0].Op == OpAMD64TESTL { 31540 v_0 := b.Controls[0] 31541 x := v_0.Args[1] 31542 if x != v_0.Args[0] { 31543 break 31544 } 31545 b.Reset(BlockFirst) 31546 return true 31547 } 31548 // match: (UGE (TESTW x x) yes no) 31549 // result: (First yes no) 31550 for b.Controls[0].Op == OpAMD64TESTW { 31551 v_0 := b.Controls[0] 31552 x := v_0.Args[1] 31553 if x != v_0.Args[0] { 31554 break 31555 } 31556 b.Reset(BlockFirst) 31557 return true 31558 } 31559 // match: (UGE (TESTB x x) yes no) 31560 // result: (First yes no) 31561 for b.Controls[0].Op == OpAMD64TESTB { 31562 v_0 := b.Controls[0] 31563 x := v_0.Args[1] 31564 if x != v_0.Args[0] { 31565 break 31566 } 31567 b.Reset(BlockFirst) 31568 return true 31569 } 31570 // match: (UGE (InvertFlags cmp) yes no) 31571 // result: (ULE cmp yes no) 31572 for b.Controls[0].Op == OpAMD64InvertFlags { 31573 v_0 := b.Controls[0] 31574 cmp := v_0.Args[0] 31575 b.resetWithControl(BlockAMD64ULE, cmp) 31576 return true 31577 } 31578 // match: (UGE (FlagEQ) yes no) 31579 // result: (First yes no) 31580 for b.Controls[0].Op == OpAMD64FlagEQ { 31581 b.Reset(BlockFirst) 31582 return true 31583 } 31584 // match: (UGE (FlagLT_ULT) yes no) 31585 // result: (First no yes) 31586 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 31587 b.Reset(BlockFirst) 31588 b.swapSuccessors() 31589 return true 31590 } 31591 // match: (UGE (FlagLT_UGT) yes no) 31592 // result: (First yes no) 31593 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 31594 b.Reset(BlockFirst) 31595 return true 31596 } 31597 // match: (UGE (FlagGT_ULT) yes no) 31598 // result: (First no yes) 31599 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 31600 b.Reset(BlockFirst) 31601 b.swapSuccessors() 31602 return true 31603 } 31604 // match: (UGE (FlagGT_UGT) yes no) 31605 // result: (First yes no) 31606 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 31607 b.Reset(BlockFirst) 31608 return true 31609 } 31610 case BlockAMD64UGT: 31611 // match: (UGT (InvertFlags cmp) yes no) 31612 // result: (ULT cmp yes no) 31613 for b.Controls[0].Op == OpAMD64InvertFlags { 31614 v_0 := b.Controls[0] 31615 cmp := v_0.Args[0] 31616 b.resetWithControl(BlockAMD64ULT, cmp) 31617 return true 31618 } 31619 // match: (UGT (FlagEQ) yes no) 31620 // result: (First no yes) 31621 for b.Controls[0].Op == OpAMD64FlagEQ { 31622 b.Reset(BlockFirst) 31623 b.swapSuccessors() 31624 return true 31625 } 31626 // match: (UGT (FlagLT_ULT) yes no) 31627 // result: (First no yes) 31628 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 31629 b.Reset(BlockFirst) 31630 b.swapSuccessors() 31631 return true 31632 } 31633 // match: (UGT (FlagLT_UGT) yes no) 31634 // result: (First yes no) 31635 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 31636 b.Reset(BlockFirst) 31637 return true 31638 } 31639 // match: (UGT (FlagGT_ULT) yes no) 31640 // result: (First no yes) 31641 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 31642 b.Reset(BlockFirst) 31643 b.swapSuccessors() 31644 return true 31645 } 31646 // match: (UGT (FlagGT_UGT) yes no) 31647 // result: (First yes no) 31648 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 31649 b.Reset(BlockFirst) 31650 return true 31651 } 31652 case BlockAMD64ULE: 31653 // match: (ULE (InvertFlags cmp) yes no) 31654 // result: (UGE cmp yes no) 31655 for b.Controls[0].Op == OpAMD64InvertFlags { 31656 v_0 := b.Controls[0] 31657 cmp := v_0.Args[0] 31658 b.resetWithControl(BlockAMD64UGE, cmp) 31659 return true 31660 } 31661 // match: (ULE (FlagEQ) yes no) 31662 // result: (First yes no) 31663 for b.Controls[0].Op == OpAMD64FlagEQ { 31664 b.Reset(BlockFirst) 31665 return true 31666 } 31667 // match: (ULE (FlagLT_ULT) yes no) 31668 // result: (First yes no) 31669 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 31670 b.Reset(BlockFirst) 31671 return true 31672 } 31673 // match: (ULE (FlagLT_UGT) yes no) 31674 // result: (First no yes) 31675 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 31676 b.Reset(BlockFirst) 31677 b.swapSuccessors() 31678 return true 31679 } 31680 // match: (ULE (FlagGT_ULT) yes no) 31681 // result: (First yes no) 31682 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 31683 b.Reset(BlockFirst) 31684 return true 31685 } 31686 // match: (ULE (FlagGT_UGT) yes no) 31687 // result: (First no yes) 31688 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 31689 b.Reset(BlockFirst) 31690 b.swapSuccessors() 31691 return true 31692 } 31693 case BlockAMD64ULT: 31694 // match: (ULT (TESTQ x x) yes no) 31695 // result: (First no yes) 31696 for b.Controls[0].Op == OpAMD64TESTQ { 31697 v_0 := b.Controls[0] 31698 x := v_0.Args[1] 31699 if x != v_0.Args[0] { 31700 break 31701 } 31702 b.Reset(BlockFirst) 31703 b.swapSuccessors() 31704 return true 31705 } 31706 // match: (ULT (TESTL x x) yes no) 31707 // result: (First no yes) 31708 for b.Controls[0].Op == OpAMD64TESTL { 31709 v_0 := b.Controls[0] 31710 x := v_0.Args[1] 31711 if x != v_0.Args[0] { 31712 break 31713 } 31714 b.Reset(BlockFirst) 31715 b.swapSuccessors() 31716 return true 31717 } 31718 // match: (ULT (TESTW x x) yes no) 31719 // result: (First no yes) 31720 for b.Controls[0].Op == OpAMD64TESTW { 31721 v_0 := b.Controls[0] 31722 x := v_0.Args[1] 31723 if x != v_0.Args[0] { 31724 break 31725 } 31726 b.Reset(BlockFirst) 31727 b.swapSuccessors() 31728 return true 31729 } 31730 // match: (ULT (TESTB x x) yes no) 31731 // result: (First no yes) 31732 for b.Controls[0].Op == OpAMD64TESTB { 31733 v_0 := b.Controls[0] 31734 x := v_0.Args[1] 31735 if x != v_0.Args[0] { 31736 break 31737 } 31738 b.Reset(BlockFirst) 31739 b.swapSuccessors() 31740 return true 31741 } 31742 // match: (ULT (InvertFlags cmp) yes no) 31743 // result: (UGT cmp yes no) 31744 for b.Controls[0].Op == OpAMD64InvertFlags { 31745 v_0 := b.Controls[0] 31746 cmp := v_0.Args[0] 31747 b.resetWithControl(BlockAMD64UGT, cmp) 31748 return true 31749 } 31750 // match: (ULT (FlagEQ) yes no) 31751 // result: (First no yes) 31752 for b.Controls[0].Op == OpAMD64FlagEQ { 31753 b.Reset(BlockFirst) 31754 b.swapSuccessors() 31755 return true 31756 } 31757 // match: (ULT (FlagLT_ULT) yes no) 31758 // result: (First yes no) 31759 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 31760 b.Reset(BlockFirst) 31761 return true 31762 } 31763 // match: (ULT (FlagLT_UGT) yes no) 31764 // result: (First no yes) 31765 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 31766 b.Reset(BlockFirst) 31767 b.swapSuccessors() 31768 return true 31769 } 31770 // match: (ULT (FlagGT_ULT) yes no) 31771 // result: (First yes no) 31772 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 31773 b.Reset(BlockFirst) 31774 return true 31775 } 31776 // match: (ULT (FlagGT_UGT) yes no) 31777 // result: (First no yes) 31778 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 31779 b.Reset(BlockFirst) 31780 b.swapSuccessors() 31781 return true 31782 } 31783 } 31784 return false 31785 }