github.com/gagliardetto/golang-go@v0.0.0-20201020153340-53909ea70814/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 import "github.com/gagliardetto/golang-go/cmd/compile/internal/types" 8 9 func rewriteValueAMD64(v *Value) bool { 10 switch v.Op { 11 case OpAMD64ADCQ: 12 return rewriteValueAMD64_OpAMD64ADCQ_0(v) 13 case OpAMD64ADCQconst: 14 return rewriteValueAMD64_OpAMD64ADCQconst_0(v) 15 case OpAMD64ADDL: 16 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) || rewriteValueAMD64_OpAMD64ADDL_20(v) 17 case OpAMD64ADDLconst: 18 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) || rewriteValueAMD64_OpAMD64ADDLconst_10(v) 19 case OpAMD64ADDLconstmodify: 20 return rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v) 21 case OpAMD64ADDLload: 22 return rewriteValueAMD64_OpAMD64ADDLload_0(v) 23 case OpAMD64ADDLmodify: 24 return rewriteValueAMD64_OpAMD64ADDLmodify_0(v) 25 case OpAMD64ADDQ: 26 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) 27 case OpAMD64ADDQcarry: 28 return rewriteValueAMD64_OpAMD64ADDQcarry_0(v) 29 case OpAMD64ADDQconst: 30 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) || rewriteValueAMD64_OpAMD64ADDQconst_10(v) 31 case OpAMD64ADDQconstmodify: 32 return rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v) 33 case OpAMD64ADDQload: 34 return rewriteValueAMD64_OpAMD64ADDQload_0(v) 35 case OpAMD64ADDQmodify: 36 return rewriteValueAMD64_OpAMD64ADDQmodify_0(v) 37 case OpAMD64ADDSD: 38 return rewriteValueAMD64_OpAMD64ADDSD_0(v) 39 case OpAMD64ADDSDload: 40 return rewriteValueAMD64_OpAMD64ADDSDload_0(v) 41 case OpAMD64ADDSS: 42 return rewriteValueAMD64_OpAMD64ADDSS_0(v) 43 case OpAMD64ADDSSload: 44 return rewriteValueAMD64_OpAMD64ADDSSload_0(v) 45 case OpAMD64ANDL: 46 return rewriteValueAMD64_OpAMD64ANDL_0(v) 47 case OpAMD64ANDLconst: 48 return rewriteValueAMD64_OpAMD64ANDLconst_0(v) 49 case OpAMD64ANDLconstmodify: 50 return rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v) 51 case OpAMD64ANDLload: 52 return rewriteValueAMD64_OpAMD64ANDLload_0(v) 53 case OpAMD64ANDLmodify: 54 return rewriteValueAMD64_OpAMD64ANDLmodify_0(v) 55 case OpAMD64ANDQ: 56 return rewriteValueAMD64_OpAMD64ANDQ_0(v) 57 case OpAMD64ANDQconst: 58 return rewriteValueAMD64_OpAMD64ANDQconst_0(v) 59 case OpAMD64ANDQconstmodify: 60 return rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v) 61 case OpAMD64ANDQload: 62 return rewriteValueAMD64_OpAMD64ANDQload_0(v) 63 case OpAMD64ANDQmodify: 64 return rewriteValueAMD64_OpAMD64ANDQmodify_0(v) 65 case OpAMD64BSFQ: 66 return rewriteValueAMD64_OpAMD64BSFQ_0(v) 67 case OpAMD64BTCLconst: 68 return rewriteValueAMD64_OpAMD64BTCLconst_0(v) 69 case OpAMD64BTCLconstmodify: 70 return rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v) 71 case OpAMD64BTCLmodify: 72 return rewriteValueAMD64_OpAMD64BTCLmodify_0(v) 73 case OpAMD64BTCQconst: 74 return rewriteValueAMD64_OpAMD64BTCQconst_0(v) 75 case OpAMD64BTCQconstmodify: 76 return rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v) 77 case OpAMD64BTCQmodify: 78 return rewriteValueAMD64_OpAMD64BTCQmodify_0(v) 79 case OpAMD64BTLconst: 80 return rewriteValueAMD64_OpAMD64BTLconst_0(v) 81 case OpAMD64BTQconst: 82 return rewriteValueAMD64_OpAMD64BTQconst_0(v) 83 case OpAMD64BTRLconst: 84 return rewriteValueAMD64_OpAMD64BTRLconst_0(v) 85 case OpAMD64BTRLconstmodify: 86 return rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v) 87 case OpAMD64BTRLmodify: 88 return rewriteValueAMD64_OpAMD64BTRLmodify_0(v) 89 case OpAMD64BTRQconst: 90 return rewriteValueAMD64_OpAMD64BTRQconst_0(v) 91 case OpAMD64BTRQconstmodify: 92 return rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v) 93 case OpAMD64BTRQmodify: 94 return rewriteValueAMD64_OpAMD64BTRQmodify_0(v) 95 case OpAMD64BTSLconst: 96 return rewriteValueAMD64_OpAMD64BTSLconst_0(v) 97 case OpAMD64BTSLconstmodify: 98 return rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v) 99 case OpAMD64BTSLmodify: 100 return rewriteValueAMD64_OpAMD64BTSLmodify_0(v) 101 case OpAMD64BTSQconst: 102 return rewriteValueAMD64_OpAMD64BTSQconst_0(v) 103 case OpAMD64BTSQconstmodify: 104 return rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v) 105 case OpAMD64BTSQmodify: 106 return rewriteValueAMD64_OpAMD64BTSQmodify_0(v) 107 case OpAMD64CMOVLCC: 108 return rewriteValueAMD64_OpAMD64CMOVLCC_0(v) 109 case OpAMD64CMOVLCS: 110 return rewriteValueAMD64_OpAMD64CMOVLCS_0(v) 111 case OpAMD64CMOVLEQ: 112 return rewriteValueAMD64_OpAMD64CMOVLEQ_0(v) 113 case OpAMD64CMOVLGE: 114 return rewriteValueAMD64_OpAMD64CMOVLGE_0(v) 115 case OpAMD64CMOVLGT: 116 return rewriteValueAMD64_OpAMD64CMOVLGT_0(v) 117 case OpAMD64CMOVLHI: 118 return rewriteValueAMD64_OpAMD64CMOVLHI_0(v) 119 case OpAMD64CMOVLLE: 120 return rewriteValueAMD64_OpAMD64CMOVLLE_0(v) 121 case OpAMD64CMOVLLS: 122 return rewriteValueAMD64_OpAMD64CMOVLLS_0(v) 123 case OpAMD64CMOVLLT: 124 return rewriteValueAMD64_OpAMD64CMOVLLT_0(v) 125 case OpAMD64CMOVLNE: 126 return rewriteValueAMD64_OpAMD64CMOVLNE_0(v) 127 case OpAMD64CMOVQCC: 128 return rewriteValueAMD64_OpAMD64CMOVQCC_0(v) 129 case OpAMD64CMOVQCS: 130 return rewriteValueAMD64_OpAMD64CMOVQCS_0(v) 131 case OpAMD64CMOVQEQ: 132 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) 133 case OpAMD64CMOVQGE: 134 return rewriteValueAMD64_OpAMD64CMOVQGE_0(v) 135 case OpAMD64CMOVQGT: 136 return rewriteValueAMD64_OpAMD64CMOVQGT_0(v) 137 case OpAMD64CMOVQHI: 138 return rewriteValueAMD64_OpAMD64CMOVQHI_0(v) 139 case OpAMD64CMOVQLE: 140 return rewriteValueAMD64_OpAMD64CMOVQLE_0(v) 141 case OpAMD64CMOVQLS: 142 return rewriteValueAMD64_OpAMD64CMOVQLS_0(v) 143 case OpAMD64CMOVQLT: 144 return rewriteValueAMD64_OpAMD64CMOVQLT_0(v) 145 case OpAMD64CMOVQNE: 146 return rewriteValueAMD64_OpAMD64CMOVQNE_0(v) 147 case OpAMD64CMOVWCC: 148 return rewriteValueAMD64_OpAMD64CMOVWCC_0(v) 149 case OpAMD64CMOVWCS: 150 return rewriteValueAMD64_OpAMD64CMOVWCS_0(v) 151 case OpAMD64CMOVWEQ: 152 return rewriteValueAMD64_OpAMD64CMOVWEQ_0(v) 153 case OpAMD64CMOVWGE: 154 return rewriteValueAMD64_OpAMD64CMOVWGE_0(v) 155 case OpAMD64CMOVWGT: 156 return rewriteValueAMD64_OpAMD64CMOVWGT_0(v) 157 case OpAMD64CMOVWHI: 158 return rewriteValueAMD64_OpAMD64CMOVWHI_0(v) 159 case OpAMD64CMOVWLE: 160 return rewriteValueAMD64_OpAMD64CMOVWLE_0(v) 161 case OpAMD64CMOVWLS: 162 return rewriteValueAMD64_OpAMD64CMOVWLS_0(v) 163 case OpAMD64CMOVWLT: 164 return rewriteValueAMD64_OpAMD64CMOVWLT_0(v) 165 case OpAMD64CMOVWNE: 166 return rewriteValueAMD64_OpAMD64CMOVWNE_0(v) 167 case OpAMD64CMPB: 168 return rewriteValueAMD64_OpAMD64CMPB_0(v) 169 case OpAMD64CMPBconst: 170 return rewriteValueAMD64_OpAMD64CMPBconst_0(v) 171 case OpAMD64CMPBconstload: 172 return rewriteValueAMD64_OpAMD64CMPBconstload_0(v) 173 case OpAMD64CMPBload: 174 return rewriteValueAMD64_OpAMD64CMPBload_0(v) 175 case OpAMD64CMPL: 176 return rewriteValueAMD64_OpAMD64CMPL_0(v) 177 case OpAMD64CMPLconst: 178 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) || rewriteValueAMD64_OpAMD64CMPLconst_10(v) 179 case OpAMD64CMPLconstload: 180 return rewriteValueAMD64_OpAMD64CMPLconstload_0(v) 181 case OpAMD64CMPLload: 182 return rewriteValueAMD64_OpAMD64CMPLload_0(v) 183 case OpAMD64CMPQ: 184 return rewriteValueAMD64_OpAMD64CMPQ_0(v) 185 case OpAMD64CMPQconst: 186 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) 187 case OpAMD64CMPQconstload: 188 return rewriteValueAMD64_OpAMD64CMPQconstload_0(v) 189 case OpAMD64CMPQload: 190 return rewriteValueAMD64_OpAMD64CMPQload_0(v) 191 case OpAMD64CMPW: 192 return rewriteValueAMD64_OpAMD64CMPW_0(v) 193 case OpAMD64CMPWconst: 194 return rewriteValueAMD64_OpAMD64CMPWconst_0(v) 195 case OpAMD64CMPWconstload: 196 return rewriteValueAMD64_OpAMD64CMPWconstload_0(v) 197 case OpAMD64CMPWload: 198 return rewriteValueAMD64_OpAMD64CMPWload_0(v) 199 case OpAMD64CMPXCHGLlock: 200 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) 201 case OpAMD64CMPXCHGQlock: 202 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) 203 case OpAMD64DIVSD: 204 return rewriteValueAMD64_OpAMD64DIVSD_0(v) 205 case OpAMD64DIVSDload: 206 return rewriteValueAMD64_OpAMD64DIVSDload_0(v) 207 case OpAMD64DIVSS: 208 return rewriteValueAMD64_OpAMD64DIVSS_0(v) 209 case OpAMD64DIVSSload: 210 return rewriteValueAMD64_OpAMD64DIVSSload_0(v) 211 case OpAMD64HMULL: 212 return rewriteValueAMD64_OpAMD64HMULL_0(v) 213 case OpAMD64HMULLU: 214 return rewriteValueAMD64_OpAMD64HMULLU_0(v) 215 case OpAMD64HMULQ: 216 return rewriteValueAMD64_OpAMD64HMULQ_0(v) 217 case OpAMD64HMULQU: 218 return rewriteValueAMD64_OpAMD64HMULQU_0(v) 219 case OpAMD64LEAL: 220 return rewriteValueAMD64_OpAMD64LEAL_0(v) 221 case OpAMD64LEAL1: 222 return rewriteValueAMD64_OpAMD64LEAL1_0(v) 223 case OpAMD64LEAL2: 224 return rewriteValueAMD64_OpAMD64LEAL2_0(v) 225 case OpAMD64LEAL4: 226 return rewriteValueAMD64_OpAMD64LEAL4_0(v) 227 case OpAMD64LEAL8: 228 return rewriteValueAMD64_OpAMD64LEAL8_0(v) 229 case OpAMD64LEAQ: 230 return rewriteValueAMD64_OpAMD64LEAQ_0(v) 231 case OpAMD64LEAQ1: 232 return rewriteValueAMD64_OpAMD64LEAQ1_0(v) 233 case OpAMD64LEAQ2: 234 return rewriteValueAMD64_OpAMD64LEAQ2_0(v) 235 case OpAMD64LEAQ4: 236 return rewriteValueAMD64_OpAMD64LEAQ4_0(v) 237 case OpAMD64LEAQ8: 238 return rewriteValueAMD64_OpAMD64LEAQ8_0(v) 239 case OpAMD64MOVBQSX: 240 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) 241 case OpAMD64MOVBQSXload: 242 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) 243 case OpAMD64MOVBQZX: 244 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) 245 case OpAMD64MOVBatomicload: 246 return rewriteValueAMD64_OpAMD64MOVBatomicload_0(v) 247 case OpAMD64MOVBload: 248 return rewriteValueAMD64_OpAMD64MOVBload_0(v) 249 case OpAMD64MOVBloadidx1: 250 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) 251 case OpAMD64MOVBstore: 252 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) || rewriteValueAMD64_OpAMD64MOVBstore_30(v) 253 case OpAMD64MOVBstoreconst: 254 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) 255 case OpAMD64MOVBstoreconstidx1: 256 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) 257 case OpAMD64MOVBstoreidx1: 258 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) || rewriteValueAMD64_OpAMD64MOVBstoreidx1_10(v) 259 case OpAMD64MOVLQSX: 260 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) 261 case OpAMD64MOVLQSXload: 262 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) 263 case OpAMD64MOVLQZX: 264 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) 265 case OpAMD64MOVLatomicload: 266 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) 267 case OpAMD64MOVLf2i: 268 return rewriteValueAMD64_OpAMD64MOVLf2i_0(v) 269 case OpAMD64MOVLi2f: 270 return rewriteValueAMD64_OpAMD64MOVLi2f_0(v) 271 case OpAMD64MOVLload: 272 return rewriteValueAMD64_OpAMD64MOVLload_0(v) || rewriteValueAMD64_OpAMD64MOVLload_10(v) 273 case OpAMD64MOVLloadidx1: 274 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) 275 case OpAMD64MOVLloadidx4: 276 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) 277 case OpAMD64MOVLloadidx8: 278 return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v) 279 case OpAMD64MOVLstore: 280 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) || rewriteValueAMD64_OpAMD64MOVLstore_20(v) || rewriteValueAMD64_OpAMD64MOVLstore_30(v) 281 case OpAMD64MOVLstoreconst: 282 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) 283 case OpAMD64MOVLstoreconstidx1: 284 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) 285 case OpAMD64MOVLstoreconstidx4: 286 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) 287 case OpAMD64MOVLstoreidx1: 288 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) 289 case OpAMD64MOVLstoreidx4: 290 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) 291 case OpAMD64MOVLstoreidx8: 292 return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v) 293 case OpAMD64MOVOload: 294 return rewriteValueAMD64_OpAMD64MOVOload_0(v) 295 case OpAMD64MOVOstore: 296 return rewriteValueAMD64_OpAMD64MOVOstore_0(v) 297 case OpAMD64MOVQatomicload: 298 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) 299 case OpAMD64MOVQf2i: 300 return rewriteValueAMD64_OpAMD64MOVQf2i_0(v) 301 case OpAMD64MOVQi2f: 302 return rewriteValueAMD64_OpAMD64MOVQi2f_0(v) 303 case OpAMD64MOVQload: 304 return rewriteValueAMD64_OpAMD64MOVQload_0(v) 305 case OpAMD64MOVQloadidx1: 306 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) 307 case OpAMD64MOVQloadidx8: 308 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) 309 case OpAMD64MOVQstore: 310 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) || rewriteValueAMD64_OpAMD64MOVQstore_20(v) || rewriteValueAMD64_OpAMD64MOVQstore_30(v) 311 case OpAMD64MOVQstoreconst: 312 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) 313 case OpAMD64MOVQstoreconstidx1: 314 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) 315 case OpAMD64MOVQstoreconstidx8: 316 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) 317 case OpAMD64MOVQstoreidx1: 318 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) 319 case OpAMD64MOVQstoreidx8: 320 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) 321 case OpAMD64MOVSDload: 322 return rewriteValueAMD64_OpAMD64MOVSDload_0(v) 323 case OpAMD64MOVSDloadidx1: 324 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) 325 case OpAMD64MOVSDloadidx8: 326 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) 327 case OpAMD64MOVSDstore: 328 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) 329 case OpAMD64MOVSDstoreidx1: 330 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) 331 case OpAMD64MOVSDstoreidx8: 332 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) 333 case OpAMD64MOVSSload: 334 return rewriteValueAMD64_OpAMD64MOVSSload_0(v) 335 case OpAMD64MOVSSloadidx1: 336 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) 337 case OpAMD64MOVSSloadidx4: 338 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) 339 case OpAMD64MOVSSstore: 340 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) 341 case OpAMD64MOVSSstoreidx1: 342 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) 343 case OpAMD64MOVSSstoreidx4: 344 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) 345 case OpAMD64MOVWQSX: 346 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) 347 case OpAMD64MOVWQSXload: 348 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) 349 case OpAMD64MOVWQZX: 350 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) 351 case OpAMD64MOVWload: 352 return rewriteValueAMD64_OpAMD64MOVWload_0(v) 353 case OpAMD64MOVWloadidx1: 354 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) 355 case OpAMD64MOVWloadidx2: 356 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) 357 case OpAMD64MOVWstore: 358 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) 359 case OpAMD64MOVWstoreconst: 360 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) 361 case OpAMD64MOVWstoreconstidx1: 362 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) 363 case OpAMD64MOVWstoreconstidx2: 364 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) 365 case OpAMD64MOVWstoreidx1: 366 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) 367 case OpAMD64MOVWstoreidx2: 368 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) 369 case OpAMD64MULL: 370 return rewriteValueAMD64_OpAMD64MULL_0(v) 371 case OpAMD64MULLconst: 372 return rewriteValueAMD64_OpAMD64MULLconst_0(v) || rewriteValueAMD64_OpAMD64MULLconst_10(v) || rewriteValueAMD64_OpAMD64MULLconst_20(v) || rewriteValueAMD64_OpAMD64MULLconst_30(v) 373 case OpAMD64MULQ: 374 return rewriteValueAMD64_OpAMD64MULQ_0(v) 375 case OpAMD64MULQconst: 376 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) || rewriteValueAMD64_OpAMD64MULQconst_30(v) 377 case OpAMD64MULSD: 378 return rewriteValueAMD64_OpAMD64MULSD_0(v) 379 case OpAMD64MULSDload: 380 return rewriteValueAMD64_OpAMD64MULSDload_0(v) 381 case OpAMD64MULSS: 382 return rewriteValueAMD64_OpAMD64MULSS_0(v) 383 case OpAMD64MULSSload: 384 return rewriteValueAMD64_OpAMD64MULSSload_0(v) 385 case OpAMD64NEGL: 386 return rewriteValueAMD64_OpAMD64NEGL_0(v) 387 case OpAMD64NEGQ: 388 return rewriteValueAMD64_OpAMD64NEGQ_0(v) 389 case OpAMD64NOTL: 390 return rewriteValueAMD64_OpAMD64NOTL_0(v) 391 case OpAMD64NOTQ: 392 return rewriteValueAMD64_OpAMD64NOTQ_0(v) 393 case OpAMD64ORL: 394 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) 395 case OpAMD64ORLconst: 396 return rewriteValueAMD64_OpAMD64ORLconst_0(v) 397 case OpAMD64ORLconstmodify: 398 return rewriteValueAMD64_OpAMD64ORLconstmodify_0(v) 399 case OpAMD64ORLload: 400 return rewriteValueAMD64_OpAMD64ORLload_0(v) 401 case OpAMD64ORLmodify: 402 return rewriteValueAMD64_OpAMD64ORLmodify_0(v) 403 case OpAMD64ORQ: 404 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) 405 case OpAMD64ORQconst: 406 return rewriteValueAMD64_OpAMD64ORQconst_0(v) 407 case OpAMD64ORQconstmodify: 408 return rewriteValueAMD64_OpAMD64ORQconstmodify_0(v) 409 case OpAMD64ORQload: 410 return rewriteValueAMD64_OpAMD64ORQload_0(v) 411 case OpAMD64ORQmodify: 412 return rewriteValueAMD64_OpAMD64ORQmodify_0(v) 413 case OpAMD64ROLB: 414 return rewriteValueAMD64_OpAMD64ROLB_0(v) 415 case OpAMD64ROLBconst: 416 return rewriteValueAMD64_OpAMD64ROLBconst_0(v) 417 case OpAMD64ROLL: 418 return rewriteValueAMD64_OpAMD64ROLL_0(v) 419 case OpAMD64ROLLconst: 420 return rewriteValueAMD64_OpAMD64ROLLconst_0(v) 421 case OpAMD64ROLQ: 422 return rewriteValueAMD64_OpAMD64ROLQ_0(v) 423 case OpAMD64ROLQconst: 424 return rewriteValueAMD64_OpAMD64ROLQconst_0(v) 425 case OpAMD64ROLW: 426 return rewriteValueAMD64_OpAMD64ROLW_0(v) 427 case OpAMD64ROLWconst: 428 return rewriteValueAMD64_OpAMD64ROLWconst_0(v) 429 case OpAMD64RORB: 430 return rewriteValueAMD64_OpAMD64RORB_0(v) 431 case OpAMD64RORL: 432 return rewriteValueAMD64_OpAMD64RORL_0(v) 433 case OpAMD64RORQ: 434 return rewriteValueAMD64_OpAMD64RORQ_0(v) 435 case OpAMD64RORW: 436 return rewriteValueAMD64_OpAMD64RORW_0(v) 437 case OpAMD64SARB: 438 return rewriteValueAMD64_OpAMD64SARB_0(v) 439 case OpAMD64SARBconst: 440 return rewriteValueAMD64_OpAMD64SARBconst_0(v) 441 case OpAMD64SARL: 442 return rewriteValueAMD64_OpAMD64SARL_0(v) 443 case OpAMD64SARLconst: 444 return rewriteValueAMD64_OpAMD64SARLconst_0(v) 445 case OpAMD64SARQ: 446 return rewriteValueAMD64_OpAMD64SARQ_0(v) 447 case OpAMD64SARQconst: 448 return rewriteValueAMD64_OpAMD64SARQconst_0(v) 449 case OpAMD64SARW: 450 return rewriteValueAMD64_OpAMD64SARW_0(v) 451 case OpAMD64SARWconst: 452 return rewriteValueAMD64_OpAMD64SARWconst_0(v) 453 case OpAMD64SBBLcarrymask: 454 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) 455 case OpAMD64SBBQ: 456 return rewriteValueAMD64_OpAMD64SBBQ_0(v) 457 case OpAMD64SBBQcarrymask: 458 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) 459 case OpAMD64SBBQconst: 460 return rewriteValueAMD64_OpAMD64SBBQconst_0(v) 461 case OpAMD64SETA: 462 return rewriteValueAMD64_OpAMD64SETA_0(v) 463 case OpAMD64SETAE: 464 return rewriteValueAMD64_OpAMD64SETAE_0(v) 465 case OpAMD64SETAEstore: 466 return rewriteValueAMD64_OpAMD64SETAEstore_0(v) 467 case OpAMD64SETAstore: 468 return rewriteValueAMD64_OpAMD64SETAstore_0(v) 469 case OpAMD64SETB: 470 return rewriteValueAMD64_OpAMD64SETB_0(v) 471 case OpAMD64SETBE: 472 return rewriteValueAMD64_OpAMD64SETBE_0(v) 473 case OpAMD64SETBEstore: 474 return rewriteValueAMD64_OpAMD64SETBEstore_0(v) 475 case OpAMD64SETBstore: 476 return rewriteValueAMD64_OpAMD64SETBstore_0(v) 477 case OpAMD64SETEQ: 478 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) || rewriteValueAMD64_OpAMD64SETEQ_20(v) 479 case OpAMD64SETEQstore: 480 return rewriteValueAMD64_OpAMD64SETEQstore_0(v) || rewriteValueAMD64_OpAMD64SETEQstore_10(v) || rewriteValueAMD64_OpAMD64SETEQstore_20(v) 481 case OpAMD64SETG: 482 return rewriteValueAMD64_OpAMD64SETG_0(v) 483 case OpAMD64SETGE: 484 return rewriteValueAMD64_OpAMD64SETGE_0(v) 485 case OpAMD64SETGEstore: 486 return rewriteValueAMD64_OpAMD64SETGEstore_0(v) 487 case OpAMD64SETGstore: 488 return rewriteValueAMD64_OpAMD64SETGstore_0(v) 489 case OpAMD64SETL: 490 return rewriteValueAMD64_OpAMD64SETL_0(v) 491 case OpAMD64SETLE: 492 return rewriteValueAMD64_OpAMD64SETLE_0(v) 493 case OpAMD64SETLEstore: 494 return rewriteValueAMD64_OpAMD64SETLEstore_0(v) 495 case OpAMD64SETLstore: 496 return rewriteValueAMD64_OpAMD64SETLstore_0(v) 497 case OpAMD64SETNE: 498 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) || rewriteValueAMD64_OpAMD64SETNE_20(v) 499 case OpAMD64SETNEstore: 500 return rewriteValueAMD64_OpAMD64SETNEstore_0(v) || rewriteValueAMD64_OpAMD64SETNEstore_10(v) || rewriteValueAMD64_OpAMD64SETNEstore_20(v) 501 case OpAMD64SHLL: 502 return rewriteValueAMD64_OpAMD64SHLL_0(v) 503 case OpAMD64SHLLconst: 504 return rewriteValueAMD64_OpAMD64SHLLconst_0(v) 505 case OpAMD64SHLQ: 506 return rewriteValueAMD64_OpAMD64SHLQ_0(v) 507 case OpAMD64SHLQconst: 508 return rewriteValueAMD64_OpAMD64SHLQconst_0(v) 509 case OpAMD64SHRB: 510 return rewriteValueAMD64_OpAMD64SHRB_0(v) 511 case OpAMD64SHRBconst: 512 return rewriteValueAMD64_OpAMD64SHRBconst_0(v) 513 case OpAMD64SHRL: 514 return rewriteValueAMD64_OpAMD64SHRL_0(v) 515 case OpAMD64SHRLconst: 516 return rewriteValueAMD64_OpAMD64SHRLconst_0(v) 517 case OpAMD64SHRQ: 518 return rewriteValueAMD64_OpAMD64SHRQ_0(v) 519 case OpAMD64SHRQconst: 520 return rewriteValueAMD64_OpAMD64SHRQconst_0(v) 521 case OpAMD64SHRW: 522 return rewriteValueAMD64_OpAMD64SHRW_0(v) 523 case OpAMD64SHRWconst: 524 return rewriteValueAMD64_OpAMD64SHRWconst_0(v) 525 case OpAMD64SUBL: 526 return rewriteValueAMD64_OpAMD64SUBL_0(v) 527 case OpAMD64SUBLconst: 528 return rewriteValueAMD64_OpAMD64SUBLconst_0(v) 529 case OpAMD64SUBLload: 530 return rewriteValueAMD64_OpAMD64SUBLload_0(v) 531 case OpAMD64SUBLmodify: 532 return rewriteValueAMD64_OpAMD64SUBLmodify_0(v) 533 case OpAMD64SUBQ: 534 return rewriteValueAMD64_OpAMD64SUBQ_0(v) 535 case OpAMD64SUBQborrow: 536 return rewriteValueAMD64_OpAMD64SUBQborrow_0(v) 537 case OpAMD64SUBQconst: 538 return rewriteValueAMD64_OpAMD64SUBQconst_0(v) 539 case OpAMD64SUBQload: 540 return rewriteValueAMD64_OpAMD64SUBQload_0(v) 541 case OpAMD64SUBQmodify: 542 return rewriteValueAMD64_OpAMD64SUBQmodify_0(v) 543 case OpAMD64SUBSD: 544 return rewriteValueAMD64_OpAMD64SUBSD_0(v) 545 case OpAMD64SUBSDload: 546 return rewriteValueAMD64_OpAMD64SUBSDload_0(v) 547 case OpAMD64SUBSS: 548 return rewriteValueAMD64_OpAMD64SUBSS_0(v) 549 case OpAMD64SUBSSload: 550 return rewriteValueAMD64_OpAMD64SUBSSload_0(v) 551 case OpAMD64TESTB: 552 return rewriteValueAMD64_OpAMD64TESTB_0(v) 553 case OpAMD64TESTBconst: 554 return rewriteValueAMD64_OpAMD64TESTBconst_0(v) 555 case OpAMD64TESTL: 556 return rewriteValueAMD64_OpAMD64TESTL_0(v) 557 case OpAMD64TESTLconst: 558 return rewriteValueAMD64_OpAMD64TESTLconst_0(v) 559 case OpAMD64TESTQ: 560 return rewriteValueAMD64_OpAMD64TESTQ_0(v) 561 case OpAMD64TESTQconst: 562 return rewriteValueAMD64_OpAMD64TESTQconst_0(v) 563 case OpAMD64TESTW: 564 return rewriteValueAMD64_OpAMD64TESTW_0(v) 565 case OpAMD64TESTWconst: 566 return rewriteValueAMD64_OpAMD64TESTWconst_0(v) 567 case OpAMD64XADDLlock: 568 return rewriteValueAMD64_OpAMD64XADDLlock_0(v) 569 case OpAMD64XADDQlock: 570 return rewriteValueAMD64_OpAMD64XADDQlock_0(v) 571 case OpAMD64XCHGL: 572 return rewriteValueAMD64_OpAMD64XCHGL_0(v) 573 case OpAMD64XCHGQ: 574 return rewriteValueAMD64_OpAMD64XCHGQ_0(v) 575 case OpAMD64XORL: 576 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) 577 case OpAMD64XORLconst: 578 return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) 579 case OpAMD64XORLconstmodify: 580 return rewriteValueAMD64_OpAMD64XORLconstmodify_0(v) 581 case OpAMD64XORLload: 582 return rewriteValueAMD64_OpAMD64XORLload_0(v) 583 case OpAMD64XORLmodify: 584 return rewriteValueAMD64_OpAMD64XORLmodify_0(v) 585 case OpAMD64XORQ: 586 return rewriteValueAMD64_OpAMD64XORQ_0(v) || rewriteValueAMD64_OpAMD64XORQ_10(v) 587 case OpAMD64XORQconst: 588 return rewriteValueAMD64_OpAMD64XORQconst_0(v) 589 case OpAMD64XORQconstmodify: 590 return rewriteValueAMD64_OpAMD64XORQconstmodify_0(v) 591 case OpAMD64XORQload: 592 return rewriteValueAMD64_OpAMD64XORQload_0(v) 593 case OpAMD64XORQmodify: 594 return rewriteValueAMD64_OpAMD64XORQmodify_0(v) 595 case OpAdd16: 596 return rewriteValueAMD64_OpAdd16_0(v) 597 case OpAdd32: 598 return rewriteValueAMD64_OpAdd32_0(v) 599 case OpAdd32F: 600 return rewriteValueAMD64_OpAdd32F_0(v) 601 case OpAdd64: 602 return rewriteValueAMD64_OpAdd64_0(v) 603 case OpAdd64F: 604 return rewriteValueAMD64_OpAdd64F_0(v) 605 case OpAdd8: 606 return rewriteValueAMD64_OpAdd8_0(v) 607 case OpAddPtr: 608 return rewriteValueAMD64_OpAddPtr_0(v) 609 case OpAddr: 610 return rewriteValueAMD64_OpAddr_0(v) 611 case OpAnd16: 612 return rewriteValueAMD64_OpAnd16_0(v) 613 case OpAnd32: 614 return rewriteValueAMD64_OpAnd32_0(v) 615 case OpAnd64: 616 return rewriteValueAMD64_OpAnd64_0(v) 617 case OpAnd8: 618 return rewriteValueAMD64_OpAnd8_0(v) 619 case OpAndB: 620 return rewriteValueAMD64_OpAndB_0(v) 621 case OpAtomicAdd32: 622 return rewriteValueAMD64_OpAtomicAdd32_0(v) 623 case OpAtomicAdd64: 624 return rewriteValueAMD64_OpAtomicAdd64_0(v) 625 case OpAtomicAnd8: 626 return rewriteValueAMD64_OpAtomicAnd8_0(v) 627 case OpAtomicCompareAndSwap32: 628 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) 629 case OpAtomicCompareAndSwap64: 630 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) 631 case OpAtomicExchange32: 632 return rewriteValueAMD64_OpAtomicExchange32_0(v) 633 case OpAtomicExchange64: 634 return rewriteValueAMD64_OpAtomicExchange64_0(v) 635 case OpAtomicLoad32: 636 return rewriteValueAMD64_OpAtomicLoad32_0(v) 637 case OpAtomicLoad64: 638 return rewriteValueAMD64_OpAtomicLoad64_0(v) 639 case OpAtomicLoad8: 640 return rewriteValueAMD64_OpAtomicLoad8_0(v) 641 case OpAtomicLoadPtr: 642 return rewriteValueAMD64_OpAtomicLoadPtr_0(v) 643 case OpAtomicOr8: 644 return rewriteValueAMD64_OpAtomicOr8_0(v) 645 case OpAtomicStore32: 646 return rewriteValueAMD64_OpAtomicStore32_0(v) 647 case OpAtomicStore64: 648 return rewriteValueAMD64_OpAtomicStore64_0(v) 649 case OpAtomicStore8: 650 return rewriteValueAMD64_OpAtomicStore8_0(v) 651 case OpAtomicStorePtrNoWB: 652 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) 653 case OpAvg64u: 654 return rewriteValueAMD64_OpAvg64u_0(v) 655 case OpBitLen16: 656 return rewriteValueAMD64_OpBitLen16_0(v) 657 case OpBitLen32: 658 return rewriteValueAMD64_OpBitLen32_0(v) 659 case OpBitLen64: 660 return rewriteValueAMD64_OpBitLen64_0(v) 661 case OpBitLen8: 662 return rewriteValueAMD64_OpBitLen8_0(v) 663 case OpBswap32: 664 return rewriteValueAMD64_OpBswap32_0(v) 665 case OpBswap64: 666 return rewriteValueAMD64_OpBswap64_0(v) 667 case OpCeil: 668 return rewriteValueAMD64_OpCeil_0(v) 669 case OpClosureCall: 670 return rewriteValueAMD64_OpClosureCall_0(v) 671 case OpCom16: 672 return rewriteValueAMD64_OpCom16_0(v) 673 case OpCom32: 674 return rewriteValueAMD64_OpCom32_0(v) 675 case OpCom64: 676 return rewriteValueAMD64_OpCom64_0(v) 677 case OpCom8: 678 return rewriteValueAMD64_OpCom8_0(v) 679 case OpCondSelect: 680 return rewriteValueAMD64_OpCondSelect_0(v) || rewriteValueAMD64_OpCondSelect_10(v) || rewriteValueAMD64_OpCondSelect_20(v) || rewriteValueAMD64_OpCondSelect_30(v) || rewriteValueAMD64_OpCondSelect_40(v) 681 case OpConst16: 682 return rewriteValueAMD64_OpConst16_0(v) 683 case OpConst32: 684 return rewriteValueAMD64_OpConst32_0(v) 685 case OpConst32F: 686 return rewriteValueAMD64_OpConst32F_0(v) 687 case OpConst64: 688 return rewriteValueAMD64_OpConst64_0(v) 689 case OpConst64F: 690 return rewriteValueAMD64_OpConst64F_0(v) 691 case OpConst8: 692 return rewriteValueAMD64_OpConst8_0(v) 693 case OpConstBool: 694 return rewriteValueAMD64_OpConstBool_0(v) 695 case OpConstNil: 696 return rewriteValueAMD64_OpConstNil_0(v) 697 case OpCtz16: 698 return rewriteValueAMD64_OpCtz16_0(v) 699 case OpCtz16NonZero: 700 return rewriteValueAMD64_OpCtz16NonZero_0(v) 701 case OpCtz32: 702 return rewriteValueAMD64_OpCtz32_0(v) 703 case OpCtz32NonZero: 704 return rewriteValueAMD64_OpCtz32NonZero_0(v) 705 case OpCtz64: 706 return rewriteValueAMD64_OpCtz64_0(v) 707 case OpCtz64NonZero: 708 return rewriteValueAMD64_OpCtz64NonZero_0(v) 709 case OpCtz8: 710 return rewriteValueAMD64_OpCtz8_0(v) 711 case OpCtz8NonZero: 712 return rewriteValueAMD64_OpCtz8NonZero_0(v) 713 case OpCvt32Fto32: 714 return rewriteValueAMD64_OpCvt32Fto32_0(v) 715 case OpCvt32Fto64: 716 return rewriteValueAMD64_OpCvt32Fto64_0(v) 717 case OpCvt32Fto64F: 718 return rewriteValueAMD64_OpCvt32Fto64F_0(v) 719 case OpCvt32to32F: 720 return rewriteValueAMD64_OpCvt32to32F_0(v) 721 case OpCvt32to64F: 722 return rewriteValueAMD64_OpCvt32to64F_0(v) 723 case OpCvt64Fto32: 724 return rewriteValueAMD64_OpCvt64Fto32_0(v) 725 case OpCvt64Fto32F: 726 return rewriteValueAMD64_OpCvt64Fto32F_0(v) 727 case OpCvt64Fto64: 728 return rewriteValueAMD64_OpCvt64Fto64_0(v) 729 case OpCvt64to32F: 730 return rewriteValueAMD64_OpCvt64to32F_0(v) 731 case OpCvt64to64F: 732 return rewriteValueAMD64_OpCvt64to64F_0(v) 733 case OpDiv128u: 734 return rewriteValueAMD64_OpDiv128u_0(v) 735 case OpDiv16: 736 return rewriteValueAMD64_OpDiv16_0(v) 737 case OpDiv16u: 738 return rewriteValueAMD64_OpDiv16u_0(v) 739 case OpDiv32: 740 return rewriteValueAMD64_OpDiv32_0(v) 741 case OpDiv32F: 742 return rewriteValueAMD64_OpDiv32F_0(v) 743 case OpDiv32u: 744 return rewriteValueAMD64_OpDiv32u_0(v) 745 case OpDiv64: 746 return rewriteValueAMD64_OpDiv64_0(v) 747 case OpDiv64F: 748 return rewriteValueAMD64_OpDiv64F_0(v) 749 case OpDiv64u: 750 return rewriteValueAMD64_OpDiv64u_0(v) 751 case OpDiv8: 752 return rewriteValueAMD64_OpDiv8_0(v) 753 case OpDiv8u: 754 return rewriteValueAMD64_OpDiv8u_0(v) 755 case OpEq16: 756 return rewriteValueAMD64_OpEq16_0(v) 757 case OpEq32: 758 return rewriteValueAMD64_OpEq32_0(v) 759 case OpEq32F: 760 return rewriteValueAMD64_OpEq32F_0(v) 761 case OpEq64: 762 return rewriteValueAMD64_OpEq64_0(v) 763 case OpEq64F: 764 return rewriteValueAMD64_OpEq64F_0(v) 765 case OpEq8: 766 return rewriteValueAMD64_OpEq8_0(v) 767 case OpEqB: 768 return rewriteValueAMD64_OpEqB_0(v) 769 case OpEqPtr: 770 return rewriteValueAMD64_OpEqPtr_0(v) 771 case OpFMA: 772 return rewriteValueAMD64_OpFMA_0(v) 773 case OpFloor: 774 return rewriteValueAMD64_OpFloor_0(v) 775 case OpGeq16: 776 return rewriteValueAMD64_OpGeq16_0(v) 777 case OpGeq16U: 778 return rewriteValueAMD64_OpGeq16U_0(v) 779 case OpGeq32: 780 return rewriteValueAMD64_OpGeq32_0(v) 781 case OpGeq32F: 782 return rewriteValueAMD64_OpGeq32F_0(v) 783 case OpGeq32U: 784 return rewriteValueAMD64_OpGeq32U_0(v) 785 case OpGeq64: 786 return rewriteValueAMD64_OpGeq64_0(v) 787 case OpGeq64F: 788 return rewriteValueAMD64_OpGeq64F_0(v) 789 case OpGeq64U: 790 return rewriteValueAMD64_OpGeq64U_0(v) 791 case OpGeq8: 792 return rewriteValueAMD64_OpGeq8_0(v) 793 case OpGeq8U: 794 return rewriteValueAMD64_OpGeq8U_0(v) 795 case OpGetCallerPC: 796 return rewriteValueAMD64_OpGetCallerPC_0(v) 797 case OpGetCallerSP: 798 return rewriteValueAMD64_OpGetCallerSP_0(v) 799 case OpGetClosurePtr: 800 return rewriteValueAMD64_OpGetClosurePtr_0(v) 801 case OpGetG: 802 return rewriteValueAMD64_OpGetG_0(v) 803 case OpGreater16: 804 return rewriteValueAMD64_OpGreater16_0(v) 805 case OpGreater16U: 806 return rewriteValueAMD64_OpGreater16U_0(v) 807 case OpGreater32: 808 return rewriteValueAMD64_OpGreater32_0(v) 809 case OpGreater32F: 810 return rewriteValueAMD64_OpGreater32F_0(v) 811 case OpGreater32U: 812 return rewriteValueAMD64_OpGreater32U_0(v) 813 case OpGreater64: 814 return rewriteValueAMD64_OpGreater64_0(v) 815 case OpGreater64F: 816 return rewriteValueAMD64_OpGreater64F_0(v) 817 case OpGreater64U: 818 return rewriteValueAMD64_OpGreater64U_0(v) 819 case OpGreater8: 820 return rewriteValueAMD64_OpGreater8_0(v) 821 case OpGreater8U: 822 return rewriteValueAMD64_OpGreater8U_0(v) 823 case OpHmul32: 824 return rewriteValueAMD64_OpHmul32_0(v) 825 case OpHmul32u: 826 return rewriteValueAMD64_OpHmul32u_0(v) 827 case OpHmul64: 828 return rewriteValueAMD64_OpHmul64_0(v) 829 case OpHmul64u: 830 return rewriteValueAMD64_OpHmul64u_0(v) 831 case OpInterCall: 832 return rewriteValueAMD64_OpInterCall_0(v) 833 case OpIsInBounds: 834 return rewriteValueAMD64_OpIsInBounds_0(v) 835 case OpIsNonNil: 836 return rewriteValueAMD64_OpIsNonNil_0(v) 837 case OpIsSliceInBounds: 838 return rewriteValueAMD64_OpIsSliceInBounds_0(v) 839 case OpLeq16: 840 return rewriteValueAMD64_OpLeq16_0(v) 841 case OpLeq16U: 842 return rewriteValueAMD64_OpLeq16U_0(v) 843 case OpLeq32: 844 return rewriteValueAMD64_OpLeq32_0(v) 845 case OpLeq32F: 846 return rewriteValueAMD64_OpLeq32F_0(v) 847 case OpLeq32U: 848 return rewriteValueAMD64_OpLeq32U_0(v) 849 case OpLeq64: 850 return rewriteValueAMD64_OpLeq64_0(v) 851 case OpLeq64F: 852 return rewriteValueAMD64_OpLeq64F_0(v) 853 case OpLeq64U: 854 return rewriteValueAMD64_OpLeq64U_0(v) 855 case OpLeq8: 856 return rewriteValueAMD64_OpLeq8_0(v) 857 case OpLeq8U: 858 return rewriteValueAMD64_OpLeq8U_0(v) 859 case OpLess16: 860 return rewriteValueAMD64_OpLess16_0(v) 861 case OpLess16U: 862 return rewriteValueAMD64_OpLess16U_0(v) 863 case OpLess32: 864 return rewriteValueAMD64_OpLess32_0(v) 865 case OpLess32F: 866 return rewriteValueAMD64_OpLess32F_0(v) 867 case OpLess32U: 868 return rewriteValueAMD64_OpLess32U_0(v) 869 case OpLess64: 870 return rewriteValueAMD64_OpLess64_0(v) 871 case OpLess64F: 872 return rewriteValueAMD64_OpLess64F_0(v) 873 case OpLess64U: 874 return rewriteValueAMD64_OpLess64U_0(v) 875 case OpLess8: 876 return rewriteValueAMD64_OpLess8_0(v) 877 case OpLess8U: 878 return rewriteValueAMD64_OpLess8U_0(v) 879 case OpLoad: 880 return rewriteValueAMD64_OpLoad_0(v) 881 case OpLocalAddr: 882 return rewriteValueAMD64_OpLocalAddr_0(v) 883 case OpLsh16x16: 884 return rewriteValueAMD64_OpLsh16x16_0(v) 885 case OpLsh16x32: 886 return rewriteValueAMD64_OpLsh16x32_0(v) 887 case OpLsh16x64: 888 return rewriteValueAMD64_OpLsh16x64_0(v) 889 case OpLsh16x8: 890 return rewriteValueAMD64_OpLsh16x8_0(v) 891 case OpLsh32x16: 892 return rewriteValueAMD64_OpLsh32x16_0(v) 893 case OpLsh32x32: 894 return rewriteValueAMD64_OpLsh32x32_0(v) 895 case OpLsh32x64: 896 return rewriteValueAMD64_OpLsh32x64_0(v) 897 case OpLsh32x8: 898 return rewriteValueAMD64_OpLsh32x8_0(v) 899 case OpLsh64x16: 900 return rewriteValueAMD64_OpLsh64x16_0(v) 901 case OpLsh64x32: 902 return rewriteValueAMD64_OpLsh64x32_0(v) 903 case OpLsh64x64: 904 return rewriteValueAMD64_OpLsh64x64_0(v) 905 case OpLsh64x8: 906 return rewriteValueAMD64_OpLsh64x8_0(v) 907 case OpLsh8x16: 908 return rewriteValueAMD64_OpLsh8x16_0(v) 909 case OpLsh8x32: 910 return rewriteValueAMD64_OpLsh8x32_0(v) 911 case OpLsh8x64: 912 return rewriteValueAMD64_OpLsh8x64_0(v) 913 case OpLsh8x8: 914 return rewriteValueAMD64_OpLsh8x8_0(v) 915 case OpMod16: 916 return rewriteValueAMD64_OpMod16_0(v) 917 case OpMod16u: 918 return rewriteValueAMD64_OpMod16u_0(v) 919 case OpMod32: 920 return rewriteValueAMD64_OpMod32_0(v) 921 case OpMod32u: 922 return rewriteValueAMD64_OpMod32u_0(v) 923 case OpMod64: 924 return rewriteValueAMD64_OpMod64_0(v) 925 case OpMod64u: 926 return rewriteValueAMD64_OpMod64u_0(v) 927 case OpMod8: 928 return rewriteValueAMD64_OpMod8_0(v) 929 case OpMod8u: 930 return rewriteValueAMD64_OpMod8u_0(v) 931 case OpMove: 932 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) || rewriteValueAMD64_OpMove_20(v) 933 case OpMul16: 934 return rewriteValueAMD64_OpMul16_0(v) 935 case OpMul32: 936 return rewriteValueAMD64_OpMul32_0(v) 937 case OpMul32F: 938 return rewriteValueAMD64_OpMul32F_0(v) 939 case OpMul64: 940 return rewriteValueAMD64_OpMul64_0(v) 941 case OpMul64F: 942 return rewriteValueAMD64_OpMul64F_0(v) 943 case OpMul64uhilo: 944 return rewriteValueAMD64_OpMul64uhilo_0(v) 945 case OpMul8: 946 return rewriteValueAMD64_OpMul8_0(v) 947 case OpNeg16: 948 return rewriteValueAMD64_OpNeg16_0(v) 949 case OpNeg32: 950 return rewriteValueAMD64_OpNeg32_0(v) 951 case OpNeg32F: 952 return rewriteValueAMD64_OpNeg32F_0(v) 953 case OpNeg64: 954 return rewriteValueAMD64_OpNeg64_0(v) 955 case OpNeg64F: 956 return rewriteValueAMD64_OpNeg64F_0(v) 957 case OpNeg8: 958 return rewriteValueAMD64_OpNeg8_0(v) 959 case OpNeq16: 960 return rewriteValueAMD64_OpNeq16_0(v) 961 case OpNeq32: 962 return rewriteValueAMD64_OpNeq32_0(v) 963 case OpNeq32F: 964 return rewriteValueAMD64_OpNeq32F_0(v) 965 case OpNeq64: 966 return rewriteValueAMD64_OpNeq64_0(v) 967 case OpNeq64F: 968 return rewriteValueAMD64_OpNeq64F_0(v) 969 case OpNeq8: 970 return rewriteValueAMD64_OpNeq8_0(v) 971 case OpNeqB: 972 return rewriteValueAMD64_OpNeqB_0(v) 973 case OpNeqPtr: 974 return rewriteValueAMD64_OpNeqPtr_0(v) 975 case OpNilCheck: 976 return rewriteValueAMD64_OpNilCheck_0(v) 977 case OpNot: 978 return rewriteValueAMD64_OpNot_0(v) 979 case OpOffPtr: 980 return rewriteValueAMD64_OpOffPtr_0(v) 981 case OpOr16: 982 return rewriteValueAMD64_OpOr16_0(v) 983 case OpOr32: 984 return rewriteValueAMD64_OpOr32_0(v) 985 case OpOr64: 986 return rewriteValueAMD64_OpOr64_0(v) 987 case OpOr8: 988 return rewriteValueAMD64_OpOr8_0(v) 989 case OpOrB: 990 return rewriteValueAMD64_OpOrB_0(v) 991 case OpPanicBounds: 992 return rewriteValueAMD64_OpPanicBounds_0(v) 993 case OpPopCount16: 994 return rewriteValueAMD64_OpPopCount16_0(v) 995 case OpPopCount32: 996 return rewriteValueAMD64_OpPopCount32_0(v) 997 case OpPopCount64: 998 return rewriteValueAMD64_OpPopCount64_0(v) 999 case OpPopCount8: 1000 return rewriteValueAMD64_OpPopCount8_0(v) 1001 case OpRotateLeft16: 1002 return rewriteValueAMD64_OpRotateLeft16_0(v) 1003 case OpRotateLeft32: 1004 return rewriteValueAMD64_OpRotateLeft32_0(v) 1005 case OpRotateLeft64: 1006 return rewriteValueAMD64_OpRotateLeft64_0(v) 1007 case OpRotateLeft8: 1008 return rewriteValueAMD64_OpRotateLeft8_0(v) 1009 case OpRound32F: 1010 return rewriteValueAMD64_OpRound32F_0(v) 1011 case OpRound64F: 1012 return rewriteValueAMD64_OpRound64F_0(v) 1013 case OpRoundToEven: 1014 return rewriteValueAMD64_OpRoundToEven_0(v) 1015 case OpRsh16Ux16: 1016 return rewriteValueAMD64_OpRsh16Ux16_0(v) 1017 case OpRsh16Ux32: 1018 return rewriteValueAMD64_OpRsh16Ux32_0(v) 1019 case OpRsh16Ux64: 1020 return rewriteValueAMD64_OpRsh16Ux64_0(v) 1021 case OpRsh16Ux8: 1022 return rewriteValueAMD64_OpRsh16Ux8_0(v) 1023 case OpRsh16x16: 1024 return rewriteValueAMD64_OpRsh16x16_0(v) 1025 case OpRsh16x32: 1026 return rewriteValueAMD64_OpRsh16x32_0(v) 1027 case OpRsh16x64: 1028 return rewriteValueAMD64_OpRsh16x64_0(v) 1029 case OpRsh16x8: 1030 return rewriteValueAMD64_OpRsh16x8_0(v) 1031 case OpRsh32Ux16: 1032 return rewriteValueAMD64_OpRsh32Ux16_0(v) 1033 case OpRsh32Ux32: 1034 return rewriteValueAMD64_OpRsh32Ux32_0(v) 1035 case OpRsh32Ux64: 1036 return rewriteValueAMD64_OpRsh32Ux64_0(v) 1037 case OpRsh32Ux8: 1038 return rewriteValueAMD64_OpRsh32Ux8_0(v) 1039 case OpRsh32x16: 1040 return rewriteValueAMD64_OpRsh32x16_0(v) 1041 case OpRsh32x32: 1042 return rewriteValueAMD64_OpRsh32x32_0(v) 1043 case OpRsh32x64: 1044 return rewriteValueAMD64_OpRsh32x64_0(v) 1045 case OpRsh32x8: 1046 return rewriteValueAMD64_OpRsh32x8_0(v) 1047 case OpRsh64Ux16: 1048 return rewriteValueAMD64_OpRsh64Ux16_0(v) 1049 case OpRsh64Ux32: 1050 return rewriteValueAMD64_OpRsh64Ux32_0(v) 1051 case OpRsh64Ux64: 1052 return rewriteValueAMD64_OpRsh64Ux64_0(v) 1053 case OpRsh64Ux8: 1054 return rewriteValueAMD64_OpRsh64Ux8_0(v) 1055 case OpRsh64x16: 1056 return rewriteValueAMD64_OpRsh64x16_0(v) 1057 case OpRsh64x32: 1058 return rewriteValueAMD64_OpRsh64x32_0(v) 1059 case OpRsh64x64: 1060 return rewriteValueAMD64_OpRsh64x64_0(v) 1061 case OpRsh64x8: 1062 return rewriteValueAMD64_OpRsh64x8_0(v) 1063 case OpRsh8Ux16: 1064 return rewriteValueAMD64_OpRsh8Ux16_0(v) 1065 case OpRsh8Ux32: 1066 return rewriteValueAMD64_OpRsh8Ux32_0(v) 1067 case OpRsh8Ux64: 1068 return rewriteValueAMD64_OpRsh8Ux64_0(v) 1069 case OpRsh8Ux8: 1070 return rewriteValueAMD64_OpRsh8Ux8_0(v) 1071 case OpRsh8x16: 1072 return rewriteValueAMD64_OpRsh8x16_0(v) 1073 case OpRsh8x32: 1074 return rewriteValueAMD64_OpRsh8x32_0(v) 1075 case OpRsh8x64: 1076 return rewriteValueAMD64_OpRsh8x64_0(v) 1077 case OpRsh8x8: 1078 return rewriteValueAMD64_OpRsh8x8_0(v) 1079 case OpSelect0: 1080 return rewriteValueAMD64_OpSelect0_0(v) 1081 case OpSelect1: 1082 return rewriteValueAMD64_OpSelect1_0(v) 1083 case OpSignExt16to32: 1084 return rewriteValueAMD64_OpSignExt16to32_0(v) 1085 case OpSignExt16to64: 1086 return rewriteValueAMD64_OpSignExt16to64_0(v) 1087 case OpSignExt32to64: 1088 return rewriteValueAMD64_OpSignExt32to64_0(v) 1089 case OpSignExt8to16: 1090 return rewriteValueAMD64_OpSignExt8to16_0(v) 1091 case OpSignExt8to32: 1092 return rewriteValueAMD64_OpSignExt8to32_0(v) 1093 case OpSignExt8to64: 1094 return rewriteValueAMD64_OpSignExt8to64_0(v) 1095 case OpSlicemask: 1096 return rewriteValueAMD64_OpSlicemask_0(v) 1097 case OpSqrt: 1098 return rewriteValueAMD64_OpSqrt_0(v) 1099 case OpStaticCall: 1100 return rewriteValueAMD64_OpStaticCall_0(v) 1101 case OpStore: 1102 return rewriteValueAMD64_OpStore_0(v) 1103 case OpSub16: 1104 return rewriteValueAMD64_OpSub16_0(v) 1105 case OpSub32: 1106 return rewriteValueAMD64_OpSub32_0(v) 1107 case OpSub32F: 1108 return rewriteValueAMD64_OpSub32F_0(v) 1109 case OpSub64: 1110 return rewriteValueAMD64_OpSub64_0(v) 1111 case OpSub64F: 1112 return rewriteValueAMD64_OpSub64F_0(v) 1113 case OpSub8: 1114 return rewriteValueAMD64_OpSub8_0(v) 1115 case OpSubPtr: 1116 return rewriteValueAMD64_OpSubPtr_0(v) 1117 case OpTrunc: 1118 return rewriteValueAMD64_OpTrunc_0(v) 1119 case OpTrunc16to8: 1120 return rewriteValueAMD64_OpTrunc16to8_0(v) 1121 case OpTrunc32to16: 1122 return rewriteValueAMD64_OpTrunc32to16_0(v) 1123 case OpTrunc32to8: 1124 return rewriteValueAMD64_OpTrunc32to8_0(v) 1125 case OpTrunc64to16: 1126 return rewriteValueAMD64_OpTrunc64to16_0(v) 1127 case OpTrunc64to32: 1128 return rewriteValueAMD64_OpTrunc64to32_0(v) 1129 case OpTrunc64to8: 1130 return rewriteValueAMD64_OpTrunc64to8_0(v) 1131 case OpWB: 1132 return rewriteValueAMD64_OpWB_0(v) 1133 case OpXor16: 1134 return rewriteValueAMD64_OpXor16_0(v) 1135 case OpXor32: 1136 return rewriteValueAMD64_OpXor32_0(v) 1137 case OpXor64: 1138 return rewriteValueAMD64_OpXor64_0(v) 1139 case OpXor8: 1140 return rewriteValueAMD64_OpXor8_0(v) 1141 case OpZero: 1142 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v) 1143 case OpZeroExt16to32: 1144 return rewriteValueAMD64_OpZeroExt16to32_0(v) 1145 case OpZeroExt16to64: 1146 return rewriteValueAMD64_OpZeroExt16to64_0(v) 1147 case OpZeroExt32to64: 1148 return rewriteValueAMD64_OpZeroExt32to64_0(v) 1149 case OpZeroExt8to16: 1150 return rewriteValueAMD64_OpZeroExt8to16_0(v) 1151 case OpZeroExt8to32: 1152 return rewriteValueAMD64_OpZeroExt8to32_0(v) 1153 case OpZeroExt8to64: 1154 return rewriteValueAMD64_OpZeroExt8to64_0(v) 1155 } 1156 return false 1157 } 1158 func rewriteValueAMD64_OpAMD64ADCQ_0(v *Value) bool { 1159 // match: (ADCQ x (MOVQconst [c]) carry) 1160 // cond: is32Bit(c) 1161 // result: (ADCQconst x [c] carry) 1162 for { 1163 carry := v.Args[2] 1164 x := v.Args[0] 1165 v_1 := v.Args[1] 1166 if v_1.Op != OpAMD64MOVQconst { 1167 break 1168 } 1169 c := v_1.AuxInt 1170 if !(is32Bit(c)) { 1171 break 1172 } 1173 v.reset(OpAMD64ADCQconst) 1174 v.AuxInt = c 1175 v.AddArg(x) 1176 v.AddArg(carry) 1177 return true 1178 } 1179 // match: (ADCQ (MOVQconst [c]) x carry) 1180 // cond: is32Bit(c) 1181 // result: (ADCQconst x [c] carry) 1182 for { 1183 carry := v.Args[2] 1184 v_0 := v.Args[0] 1185 if v_0.Op != OpAMD64MOVQconst { 1186 break 1187 } 1188 c := v_0.AuxInt 1189 x := v.Args[1] 1190 if !(is32Bit(c)) { 1191 break 1192 } 1193 v.reset(OpAMD64ADCQconst) 1194 v.AuxInt = c 1195 v.AddArg(x) 1196 v.AddArg(carry) 1197 return true 1198 } 1199 // match: (ADCQ x y (FlagEQ)) 1200 // result: (ADDQcarry x y) 1201 for { 1202 _ = v.Args[2] 1203 x := v.Args[0] 1204 y := v.Args[1] 1205 v_2 := v.Args[2] 1206 if v_2.Op != OpAMD64FlagEQ { 1207 break 1208 } 1209 v.reset(OpAMD64ADDQcarry) 1210 v.AddArg(x) 1211 v.AddArg(y) 1212 return true 1213 } 1214 return false 1215 } 1216 func rewriteValueAMD64_OpAMD64ADCQconst_0(v *Value) bool { 1217 // match: (ADCQconst x [c] (FlagEQ)) 1218 // result: (ADDQconstcarry x [c]) 1219 for { 1220 c := v.AuxInt 1221 _ = v.Args[1] 1222 x := v.Args[0] 1223 v_1 := v.Args[1] 1224 if v_1.Op != OpAMD64FlagEQ { 1225 break 1226 } 1227 v.reset(OpAMD64ADDQconstcarry) 1228 v.AuxInt = c 1229 v.AddArg(x) 1230 return true 1231 } 1232 return false 1233 } 1234 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { 1235 // match: (ADDL x (MOVLconst [c])) 1236 // result: (ADDLconst [c] x) 1237 for { 1238 _ = v.Args[1] 1239 x := v.Args[0] 1240 v_1 := v.Args[1] 1241 if v_1.Op != OpAMD64MOVLconst { 1242 break 1243 } 1244 c := v_1.AuxInt 1245 v.reset(OpAMD64ADDLconst) 1246 v.AuxInt = c 1247 v.AddArg(x) 1248 return true 1249 } 1250 // match: (ADDL (MOVLconst [c]) x) 1251 // result: (ADDLconst [c] x) 1252 for { 1253 x := v.Args[1] 1254 v_0 := v.Args[0] 1255 if v_0.Op != OpAMD64MOVLconst { 1256 break 1257 } 1258 c := v_0.AuxInt 1259 v.reset(OpAMD64ADDLconst) 1260 v.AuxInt = c 1261 v.AddArg(x) 1262 return true 1263 } 1264 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 1265 // cond: d==32-c 1266 // result: (ROLLconst x [c]) 1267 for { 1268 _ = v.Args[1] 1269 v_0 := v.Args[0] 1270 if v_0.Op != OpAMD64SHLLconst { 1271 break 1272 } 1273 c := v_0.AuxInt 1274 x := v_0.Args[0] 1275 v_1 := v.Args[1] 1276 if v_1.Op != OpAMD64SHRLconst { 1277 break 1278 } 1279 d := v_1.AuxInt 1280 if x != v_1.Args[0] || !(d == 32-c) { 1281 break 1282 } 1283 v.reset(OpAMD64ROLLconst) 1284 v.AuxInt = c 1285 v.AddArg(x) 1286 return true 1287 } 1288 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 1289 // cond: d==32-c 1290 // result: (ROLLconst x [c]) 1291 for { 1292 _ = v.Args[1] 1293 v_0 := v.Args[0] 1294 if v_0.Op != OpAMD64SHRLconst { 1295 break 1296 } 1297 d := v_0.AuxInt 1298 x := v_0.Args[0] 1299 v_1 := v.Args[1] 1300 if v_1.Op != OpAMD64SHLLconst { 1301 break 1302 } 1303 c := v_1.AuxInt 1304 if x != v_1.Args[0] || !(d == 32-c) { 1305 break 1306 } 1307 v.reset(OpAMD64ROLLconst) 1308 v.AuxInt = c 1309 v.AddArg(x) 1310 return true 1311 } 1312 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 1313 // cond: d==16-c && c < 16 && t.Size() == 2 1314 // result: (ROLWconst x [c]) 1315 for { 1316 t := v.Type 1317 _ = v.Args[1] 1318 v_0 := v.Args[0] 1319 if v_0.Op != OpAMD64SHLLconst { 1320 break 1321 } 1322 c := v_0.AuxInt 1323 x := v_0.Args[0] 1324 v_1 := v.Args[1] 1325 if v_1.Op != OpAMD64SHRWconst { 1326 break 1327 } 1328 d := v_1.AuxInt 1329 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { 1330 break 1331 } 1332 v.reset(OpAMD64ROLWconst) 1333 v.AuxInt = c 1334 v.AddArg(x) 1335 return true 1336 } 1337 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 1338 // cond: d==16-c && c < 16 && t.Size() == 2 1339 // result: (ROLWconst x [c]) 1340 for { 1341 t := v.Type 1342 _ = v.Args[1] 1343 v_0 := v.Args[0] 1344 if v_0.Op != OpAMD64SHRWconst { 1345 break 1346 } 1347 d := v_0.AuxInt 1348 x := v_0.Args[0] 1349 v_1 := v.Args[1] 1350 if v_1.Op != OpAMD64SHLLconst { 1351 break 1352 } 1353 c := v_1.AuxInt 1354 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { 1355 break 1356 } 1357 v.reset(OpAMD64ROLWconst) 1358 v.AuxInt = c 1359 v.AddArg(x) 1360 return true 1361 } 1362 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 1363 // cond: d==8-c && c < 8 && t.Size() == 1 1364 // result: (ROLBconst x [c]) 1365 for { 1366 t := v.Type 1367 _ = v.Args[1] 1368 v_0 := v.Args[0] 1369 if v_0.Op != OpAMD64SHLLconst { 1370 break 1371 } 1372 c := v_0.AuxInt 1373 x := v_0.Args[0] 1374 v_1 := v.Args[1] 1375 if v_1.Op != OpAMD64SHRBconst { 1376 break 1377 } 1378 d := v_1.AuxInt 1379 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { 1380 break 1381 } 1382 v.reset(OpAMD64ROLBconst) 1383 v.AuxInt = c 1384 v.AddArg(x) 1385 return true 1386 } 1387 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1388 // cond: d==8-c && c < 8 && t.Size() == 1 1389 // result: (ROLBconst x [c]) 1390 for { 1391 t := v.Type 1392 _ = v.Args[1] 1393 v_0 := v.Args[0] 1394 if v_0.Op != OpAMD64SHRBconst { 1395 break 1396 } 1397 d := v_0.AuxInt 1398 x := v_0.Args[0] 1399 v_1 := v.Args[1] 1400 if v_1.Op != OpAMD64SHLLconst { 1401 break 1402 } 1403 c := v_1.AuxInt 1404 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { 1405 break 1406 } 1407 v.reset(OpAMD64ROLBconst) 1408 v.AuxInt = c 1409 v.AddArg(x) 1410 return true 1411 } 1412 // match: (ADDL x (SHLLconst [3] y)) 1413 // result: (LEAL8 x y) 1414 for { 1415 _ = v.Args[1] 1416 x := v.Args[0] 1417 v_1 := v.Args[1] 1418 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 3 { 1419 break 1420 } 1421 y := v_1.Args[0] 1422 v.reset(OpAMD64LEAL8) 1423 v.AddArg(x) 1424 v.AddArg(y) 1425 return true 1426 } 1427 // match: (ADDL (SHLLconst [3] y) x) 1428 // result: (LEAL8 x y) 1429 for { 1430 x := v.Args[1] 1431 v_0 := v.Args[0] 1432 if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 3 { 1433 break 1434 } 1435 y := v_0.Args[0] 1436 v.reset(OpAMD64LEAL8) 1437 v.AddArg(x) 1438 v.AddArg(y) 1439 return true 1440 } 1441 return false 1442 } 1443 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { 1444 // match: (ADDL x (SHLLconst [2] y)) 1445 // result: (LEAL4 x y) 1446 for { 1447 _ = v.Args[1] 1448 x := v.Args[0] 1449 v_1 := v.Args[1] 1450 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 { 1451 break 1452 } 1453 y := v_1.Args[0] 1454 v.reset(OpAMD64LEAL4) 1455 v.AddArg(x) 1456 v.AddArg(y) 1457 return true 1458 } 1459 // match: (ADDL (SHLLconst [2] y) x) 1460 // result: (LEAL4 x y) 1461 for { 1462 x := v.Args[1] 1463 v_0 := v.Args[0] 1464 if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 2 { 1465 break 1466 } 1467 y := v_0.Args[0] 1468 v.reset(OpAMD64LEAL4) 1469 v.AddArg(x) 1470 v.AddArg(y) 1471 return true 1472 } 1473 // match: (ADDL x (SHLLconst [1] y)) 1474 // result: (LEAL2 x y) 1475 for { 1476 _ = v.Args[1] 1477 x := v.Args[0] 1478 v_1 := v.Args[1] 1479 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { 1480 break 1481 } 1482 y := v_1.Args[0] 1483 v.reset(OpAMD64LEAL2) 1484 v.AddArg(x) 1485 v.AddArg(y) 1486 return true 1487 } 1488 // match: (ADDL (SHLLconst [1] y) x) 1489 // result: (LEAL2 x y) 1490 for { 1491 x := v.Args[1] 1492 v_0 := v.Args[0] 1493 if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 1 { 1494 break 1495 } 1496 y := v_0.Args[0] 1497 v.reset(OpAMD64LEAL2) 1498 v.AddArg(x) 1499 v.AddArg(y) 1500 return true 1501 } 1502 // match: (ADDL x (ADDL y y)) 1503 // result: (LEAL2 x y) 1504 for { 1505 _ = v.Args[1] 1506 x := v.Args[0] 1507 v_1 := v.Args[1] 1508 if v_1.Op != OpAMD64ADDL { 1509 break 1510 } 1511 y := v_1.Args[1] 1512 if y != v_1.Args[0] { 1513 break 1514 } 1515 v.reset(OpAMD64LEAL2) 1516 v.AddArg(x) 1517 v.AddArg(y) 1518 return true 1519 } 1520 // match: (ADDL (ADDL y y) x) 1521 // result: (LEAL2 x y) 1522 for { 1523 x := v.Args[1] 1524 v_0 := v.Args[0] 1525 if v_0.Op != OpAMD64ADDL { 1526 break 1527 } 1528 y := v_0.Args[1] 1529 if y != v_0.Args[0] { 1530 break 1531 } 1532 v.reset(OpAMD64LEAL2) 1533 v.AddArg(x) 1534 v.AddArg(y) 1535 return true 1536 } 1537 // match: (ADDL x (ADDL x y)) 1538 // result: (LEAL2 y x) 1539 for { 1540 _ = v.Args[1] 1541 x := v.Args[0] 1542 v_1 := v.Args[1] 1543 if v_1.Op != OpAMD64ADDL { 1544 break 1545 } 1546 y := v_1.Args[1] 1547 if x != v_1.Args[0] { 1548 break 1549 } 1550 v.reset(OpAMD64LEAL2) 1551 v.AddArg(y) 1552 v.AddArg(x) 1553 return true 1554 } 1555 // match: (ADDL x (ADDL y x)) 1556 // result: (LEAL2 y x) 1557 for { 1558 _ = v.Args[1] 1559 x := v.Args[0] 1560 v_1 := v.Args[1] 1561 if v_1.Op != OpAMD64ADDL { 1562 break 1563 } 1564 _ = v_1.Args[1] 1565 y := v_1.Args[0] 1566 if x != v_1.Args[1] { 1567 break 1568 } 1569 v.reset(OpAMD64LEAL2) 1570 v.AddArg(y) 1571 v.AddArg(x) 1572 return true 1573 } 1574 // match: (ADDL (ADDL x y) x) 1575 // result: (LEAL2 y x) 1576 for { 1577 x := v.Args[1] 1578 v_0 := v.Args[0] 1579 if v_0.Op != OpAMD64ADDL { 1580 break 1581 } 1582 y := v_0.Args[1] 1583 if x != v_0.Args[0] { 1584 break 1585 } 1586 v.reset(OpAMD64LEAL2) 1587 v.AddArg(y) 1588 v.AddArg(x) 1589 return true 1590 } 1591 // match: (ADDL (ADDL y x) x) 1592 // result: (LEAL2 y x) 1593 for { 1594 x := v.Args[1] 1595 v_0 := v.Args[0] 1596 if v_0.Op != OpAMD64ADDL { 1597 break 1598 } 1599 _ = v_0.Args[1] 1600 y := v_0.Args[0] 1601 if x != v_0.Args[1] { 1602 break 1603 } 1604 v.reset(OpAMD64LEAL2) 1605 v.AddArg(y) 1606 v.AddArg(x) 1607 return true 1608 } 1609 return false 1610 } 1611 func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool { 1612 // match: (ADDL (ADDLconst [c] x) y) 1613 // result: (LEAL1 [c] x y) 1614 for { 1615 y := v.Args[1] 1616 v_0 := v.Args[0] 1617 if v_0.Op != OpAMD64ADDLconst { 1618 break 1619 } 1620 c := v_0.AuxInt 1621 x := v_0.Args[0] 1622 v.reset(OpAMD64LEAL1) 1623 v.AuxInt = c 1624 v.AddArg(x) 1625 v.AddArg(y) 1626 return true 1627 } 1628 // match: (ADDL y (ADDLconst [c] x)) 1629 // result: (LEAL1 [c] x y) 1630 for { 1631 _ = v.Args[1] 1632 y := v.Args[0] 1633 v_1 := v.Args[1] 1634 if v_1.Op != OpAMD64ADDLconst { 1635 break 1636 } 1637 c := v_1.AuxInt 1638 x := v_1.Args[0] 1639 v.reset(OpAMD64LEAL1) 1640 v.AuxInt = c 1641 v.AddArg(x) 1642 v.AddArg(y) 1643 return true 1644 } 1645 // match: (ADDL x (LEAL [c] {s} y)) 1646 // cond: x.Op != OpSB && y.Op != OpSB 1647 // result: (LEAL1 [c] {s} x y) 1648 for { 1649 _ = v.Args[1] 1650 x := v.Args[0] 1651 v_1 := v.Args[1] 1652 if v_1.Op != OpAMD64LEAL { 1653 break 1654 } 1655 c := v_1.AuxInt 1656 s := v_1.Aux 1657 y := v_1.Args[0] 1658 if !(x.Op != OpSB && y.Op != OpSB) { 1659 break 1660 } 1661 v.reset(OpAMD64LEAL1) 1662 v.AuxInt = c 1663 v.Aux = s 1664 v.AddArg(x) 1665 v.AddArg(y) 1666 return true 1667 } 1668 // match: (ADDL (LEAL [c] {s} y) x) 1669 // cond: x.Op != OpSB && y.Op != OpSB 1670 // result: (LEAL1 [c] {s} x y) 1671 for { 1672 x := v.Args[1] 1673 v_0 := v.Args[0] 1674 if v_0.Op != OpAMD64LEAL { 1675 break 1676 } 1677 c := v_0.AuxInt 1678 s := v_0.Aux 1679 y := v_0.Args[0] 1680 if !(x.Op != OpSB && y.Op != OpSB) { 1681 break 1682 } 1683 v.reset(OpAMD64LEAL1) 1684 v.AuxInt = c 1685 v.Aux = s 1686 v.AddArg(x) 1687 v.AddArg(y) 1688 return true 1689 } 1690 // match: (ADDL x (NEGL y)) 1691 // result: (SUBL x y) 1692 for { 1693 _ = v.Args[1] 1694 x := v.Args[0] 1695 v_1 := v.Args[1] 1696 if v_1.Op != OpAMD64NEGL { 1697 break 1698 } 1699 y := v_1.Args[0] 1700 v.reset(OpAMD64SUBL) 1701 v.AddArg(x) 1702 v.AddArg(y) 1703 return true 1704 } 1705 // match: (ADDL (NEGL y) x) 1706 // result: (SUBL x y) 1707 for { 1708 x := v.Args[1] 1709 v_0 := v.Args[0] 1710 if v_0.Op != OpAMD64NEGL { 1711 break 1712 } 1713 y := v_0.Args[0] 1714 v.reset(OpAMD64SUBL) 1715 v.AddArg(x) 1716 v.AddArg(y) 1717 return true 1718 } 1719 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1720 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 1721 // result: (ADDLload x [off] {sym} ptr mem) 1722 for { 1723 _ = v.Args[1] 1724 x := v.Args[0] 1725 l := v.Args[1] 1726 if l.Op != OpAMD64MOVLload { 1727 break 1728 } 1729 off := l.AuxInt 1730 sym := l.Aux 1731 mem := l.Args[1] 1732 ptr := l.Args[0] 1733 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 1734 break 1735 } 1736 v.reset(OpAMD64ADDLload) 1737 v.AuxInt = off 1738 v.Aux = sym 1739 v.AddArg(x) 1740 v.AddArg(ptr) 1741 v.AddArg(mem) 1742 return true 1743 } 1744 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1745 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 1746 // result: (ADDLload x [off] {sym} ptr mem) 1747 for { 1748 x := v.Args[1] 1749 l := v.Args[0] 1750 if l.Op != OpAMD64MOVLload { 1751 break 1752 } 1753 off := l.AuxInt 1754 sym := l.Aux 1755 mem := l.Args[1] 1756 ptr := l.Args[0] 1757 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 1758 break 1759 } 1760 v.reset(OpAMD64ADDLload) 1761 v.AuxInt = off 1762 v.Aux = sym 1763 v.AddArg(x) 1764 v.AddArg(ptr) 1765 v.AddArg(mem) 1766 return true 1767 } 1768 return false 1769 } 1770 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { 1771 // match: (ADDLconst [c] (ADDL x y)) 1772 // result: (LEAL1 [c] x y) 1773 for { 1774 c := v.AuxInt 1775 v_0 := v.Args[0] 1776 if v_0.Op != OpAMD64ADDL { 1777 break 1778 } 1779 y := v_0.Args[1] 1780 x := v_0.Args[0] 1781 v.reset(OpAMD64LEAL1) 1782 v.AuxInt = c 1783 v.AddArg(x) 1784 v.AddArg(y) 1785 return true 1786 } 1787 // match: (ADDLconst [c] (SHLLconst [1] x)) 1788 // result: (LEAL1 [c] x x) 1789 for { 1790 c := v.AuxInt 1791 v_0 := v.Args[0] 1792 if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 1 { 1793 break 1794 } 1795 x := v_0.Args[0] 1796 v.reset(OpAMD64LEAL1) 1797 v.AuxInt = c 1798 v.AddArg(x) 1799 v.AddArg(x) 1800 return true 1801 } 1802 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1803 // cond: is32Bit(c+d) 1804 // result: (LEAL [c+d] {s} x) 1805 for { 1806 c := v.AuxInt 1807 v_0 := v.Args[0] 1808 if v_0.Op != OpAMD64LEAL { 1809 break 1810 } 1811 d := v_0.AuxInt 1812 s := v_0.Aux 1813 x := v_0.Args[0] 1814 if !(is32Bit(c + d)) { 1815 break 1816 } 1817 v.reset(OpAMD64LEAL) 1818 v.AuxInt = c + d 1819 v.Aux = s 1820 v.AddArg(x) 1821 return true 1822 } 1823 // match: (ADDLconst [c] (LEAL1 [d] {s} x y)) 1824 // cond: is32Bit(c+d) 1825 // result: (LEAL1 [c+d] {s} x y) 1826 for { 1827 c := v.AuxInt 1828 v_0 := v.Args[0] 1829 if v_0.Op != OpAMD64LEAL1 { 1830 break 1831 } 1832 d := v_0.AuxInt 1833 s := v_0.Aux 1834 y := v_0.Args[1] 1835 x := v_0.Args[0] 1836 if !(is32Bit(c + d)) { 1837 break 1838 } 1839 v.reset(OpAMD64LEAL1) 1840 v.AuxInt = c + d 1841 v.Aux = s 1842 v.AddArg(x) 1843 v.AddArg(y) 1844 return true 1845 } 1846 // match: (ADDLconst [c] (LEAL2 [d] {s} x y)) 1847 // cond: is32Bit(c+d) 1848 // result: (LEAL2 [c+d] {s} x y) 1849 for { 1850 c := v.AuxInt 1851 v_0 := v.Args[0] 1852 if v_0.Op != OpAMD64LEAL2 { 1853 break 1854 } 1855 d := v_0.AuxInt 1856 s := v_0.Aux 1857 y := v_0.Args[1] 1858 x := v_0.Args[0] 1859 if !(is32Bit(c + d)) { 1860 break 1861 } 1862 v.reset(OpAMD64LEAL2) 1863 v.AuxInt = c + d 1864 v.Aux = s 1865 v.AddArg(x) 1866 v.AddArg(y) 1867 return true 1868 } 1869 // match: (ADDLconst [c] (LEAL4 [d] {s} x y)) 1870 // cond: is32Bit(c+d) 1871 // result: (LEAL4 [c+d] {s} x y) 1872 for { 1873 c := v.AuxInt 1874 v_0 := v.Args[0] 1875 if v_0.Op != OpAMD64LEAL4 { 1876 break 1877 } 1878 d := v_0.AuxInt 1879 s := v_0.Aux 1880 y := v_0.Args[1] 1881 x := v_0.Args[0] 1882 if !(is32Bit(c + d)) { 1883 break 1884 } 1885 v.reset(OpAMD64LEAL4) 1886 v.AuxInt = c + d 1887 v.Aux = s 1888 v.AddArg(x) 1889 v.AddArg(y) 1890 return true 1891 } 1892 // match: (ADDLconst [c] (LEAL8 [d] {s} x y)) 1893 // cond: is32Bit(c+d) 1894 // result: (LEAL8 [c+d] {s} x y) 1895 for { 1896 c := v.AuxInt 1897 v_0 := v.Args[0] 1898 if v_0.Op != OpAMD64LEAL8 { 1899 break 1900 } 1901 d := v_0.AuxInt 1902 s := v_0.Aux 1903 y := v_0.Args[1] 1904 x := v_0.Args[0] 1905 if !(is32Bit(c + d)) { 1906 break 1907 } 1908 v.reset(OpAMD64LEAL8) 1909 v.AuxInt = c + d 1910 v.Aux = s 1911 v.AddArg(x) 1912 v.AddArg(y) 1913 return true 1914 } 1915 // match: (ADDLconst [c] x) 1916 // cond: int32(c)==0 1917 // result: x 1918 for { 1919 c := v.AuxInt 1920 x := v.Args[0] 1921 if !(int32(c) == 0) { 1922 break 1923 } 1924 v.reset(OpCopy) 1925 v.Type = x.Type 1926 v.AddArg(x) 1927 return true 1928 } 1929 // match: (ADDLconst [c] (MOVLconst [d])) 1930 // result: (MOVLconst [int64(int32(c+d))]) 1931 for { 1932 c := v.AuxInt 1933 v_0 := v.Args[0] 1934 if v_0.Op != OpAMD64MOVLconst { 1935 break 1936 } 1937 d := v_0.AuxInt 1938 v.reset(OpAMD64MOVLconst) 1939 v.AuxInt = int64(int32(c + d)) 1940 return true 1941 } 1942 // match: (ADDLconst [c] (ADDLconst [d] x)) 1943 // result: (ADDLconst [int64(int32(c+d))] x) 1944 for { 1945 c := v.AuxInt 1946 v_0 := v.Args[0] 1947 if v_0.Op != OpAMD64ADDLconst { 1948 break 1949 } 1950 d := v_0.AuxInt 1951 x := v_0.Args[0] 1952 v.reset(OpAMD64ADDLconst) 1953 v.AuxInt = int64(int32(c + d)) 1954 v.AddArg(x) 1955 return true 1956 } 1957 return false 1958 } 1959 func rewriteValueAMD64_OpAMD64ADDLconst_10(v *Value) bool { 1960 // match: (ADDLconst [off] x:(SP)) 1961 // result: (LEAL [off] x) 1962 for { 1963 off := v.AuxInt 1964 x := v.Args[0] 1965 if x.Op != OpSP { 1966 break 1967 } 1968 v.reset(OpAMD64LEAL) 1969 v.AuxInt = off 1970 v.AddArg(x) 1971 return true 1972 } 1973 return false 1974 } 1975 func rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v *Value) bool { 1976 // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 1977 // cond: ValAndOff(valoff1).canAdd(off2) 1978 // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 1979 for { 1980 valoff1 := v.AuxInt 1981 sym := v.Aux 1982 mem := v.Args[1] 1983 v_0 := v.Args[0] 1984 if v_0.Op != OpAMD64ADDQconst { 1985 break 1986 } 1987 off2 := v_0.AuxInt 1988 base := v_0.Args[0] 1989 if !(ValAndOff(valoff1).canAdd(off2)) { 1990 break 1991 } 1992 v.reset(OpAMD64ADDLconstmodify) 1993 v.AuxInt = ValAndOff(valoff1).add(off2) 1994 v.Aux = sym 1995 v.AddArg(base) 1996 v.AddArg(mem) 1997 return true 1998 } 1999 // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 2000 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 2001 // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 2002 for { 2003 valoff1 := v.AuxInt 2004 sym1 := v.Aux 2005 mem := v.Args[1] 2006 v_0 := v.Args[0] 2007 if v_0.Op != OpAMD64LEAQ { 2008 break 2009 } 2010 off2 := v_0.AuxInt 2011 sym2 := v_0.Aux 2012 base := v_0.Args[0] 2013 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 2014 break 2015 } 2016 v.reset(OpAMD64ADDLconstmodify) 2017 v.AuxInt = ValAndOff(valoff1).add(off2) 2018 v.Aux = mergeSym(sym1, sym2) 2019 v.AddArg(base) 2020 v.AddArg(mem) 2021 return true 2022 } 2023 return false 2024 } 2025 func rewriteValueAMD64_OpAMD64ADDLload_0(v *Value) bool { 2026 b := v.Block 2027 typ := &b.Func.Config.Types 2028 // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem) 2029 // cond: is32Bit(off1+off2) 2030 // result: (ADDLload [off1+off2] {sym} val base mem) 2031 for { 2032 off1 := v.AuxInt 2033 sym := v.Aux 2034 mem := v.Args[2] 2035 val := v.Args[0] 2036 v_1 := v.Args[1] 2037 if v_1.Op != OpAMD64ADDQconst { 2038 break 2039 } 2040 off2 := v_1.AuxInt 2041 base := v_1.Args[0] 2042 if !(is32Bit(off1 + off2)) { 2043 break 2044 } 2045 v.reset(OpAMD64ADDLload) 2046 v.AuxInt = off1 + off2 2047 v.Aux = sym 2048 v.AddArg(val) 2049 v.AddArg(base) 2050 v.AddArg(mem) 2051 return true 2052 } 2053 // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2054 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2055 // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2056 for { 2057 off1 := v.AuxInt 2058 sym1 := v.Aux 2059 mem := v.Args[2] 2060 val := v.Args[0] 2061 v_1 := v.Args[1] 2062 if v_1.Op != OpAMD64LEAQ { 2063 break 2064 } 2065 off2 := v_1.AuxInt 2066 sym2 := v_1.Aux 2067 base := v_1.Args[0] 2068 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2069 break 2070 } 2071 v.reset(OpAMD64ADDLload) 2072 v.AuxInt = off1 + off2 2073 v.Aux = mergeSym(sym1, sym2) 2074 v.AddArg(val) 2075 v.AddArg(base) 2076 v.AddArg(mem) 2077 return true 2078 } 2079 // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 2080 // result: (ADDL x (MOVLf2i y)) 2081 for { 2082 off := v.AuxInt 2083 sym := v.Aux 2084 _ = v.Args[2] 2085 x := v.Args[0] 2086 ptr := v.Args[1] 2087 v_2 := v.Args[2] 2088 if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { 2089 break 2090 } 2091 _ = v_2.Args[2] 2092 if ptr != v_2.Args[0] { 2093 break 2094 } 2095 y := v_2.Args[1] 2096 v.reset(OpAMD64ADDL) 2097 v.AddArg(x) 2098 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 2099 v0.AddArg(y) 2100 v.AddArg(v0) 2101 return true 2102 } 2103 return false 2104 } 2105 func rewriteValueAMD64_OpAMD64ADDLmodify_0(v *Value) bool { 2106 // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 2107 // cond: is32Bit(off1+off2) 2108 // result: (ADDLmodify [off1+off2] {sym} base val mem) 2109 for { 2110 off1 := v.AuxInt 2111 sym := v.Aux 2112 mem := v.Args[2] 2113 v_0 := v.Args[0] 2114 if v_0.Op != OpAMD64ADDQconst { 2115 break 2116 } 2117 off2 := v_0.AuxInt 2118 base := v_0.Args[0] 2119 val := v.Args[1] 2120 if !(is32Bit(off1 + off2)) { 2121 break 2122 } 2123 v.reset(OpAMD64ADDLmodify) 2124 v.AuxInt = off1 + off2 2125 v.Aux = sym 2126 v.AddArg(base) 2127 v.AddArg(val) 2128 v.AddArg(mem) 2129 return true 2130 } 2131 // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 2132 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2133 // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 2134 for { 2135 off1 := v.AuxInt 2136 sym1 := v.Aux 2137 mem := v.Args[2] 2138 v_0 := v.Args[0] 2139 if v_0.Op != OpAMD64LEAQ { 2140 break 2141 } 2142 off2 := v_0.AuxInt 2143 sym2 := v_0.Aux 2144 base := v_0.Args[0] 2145 val := v.Args[1] 2146 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2147 break 2148 } 2149 v.reset(OpAMD64ADDLmodify) 2150 v.AuxInt = off1 + off2 2151 v.Aux = mergeSym(sym1, sym2) 2152 v.AddArg(base) 2153 v.AddArg(val) 2154 v.AddArg(mem) 2155 return true 2156 } 2157 return false 2158 } 2159 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { 2160 // match: (ADDQ x (MOVQconst [c])) 2161 // cond: is32Bit(c) 2162 // result: (ADDQconst [c] x) 2163 for { 2164 _ = v.Args[1] 2165 x := v.Args[0] 2166 v_1 := v.Args[1] 2167 if v_1.Op != OpAMD64MOVQconst { 2168 break 2169 } 2170 c := v_1.AuxInt 2171 if !(is32Bit(c)) { 2172 break 2173 } 2174 v.reset(OpAMD64ADDQconst) 2175 v.AuxInt = c 2176 v.AddArg(x) 2177 return true 2178 } 2179 // match: (ADDQ (MOVQconst [c]) x) 2180 // cond: is32Bit(c) 2181 // result: (ADDQconst [c] x) 2182 for { 2183 x := v.Args[1] 2184 v_0 := v.Args[0] 2185 if v_0.Op != OpAMD64MOVQconst { 2186 break 2187 } 2188 c := v_0.AuxInt 2189 if !(is32Bit(c)) { 2190 break 2191 } 2192 v.reset(OpAMD64ADDQconst) 2193 v.AuxInt = c 2194 v.AddArg(x) 2195 return true 2196 } 2197 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 2198 // cond: d==64-c 2199 // result: (ROLQconst x [c]) 2200 for { 2201 _ = v.Args[1] 2202 v_0 := v.Args[0] 2203 if v_0.Op != OpAMD64SHLQconst { 2204 break 2205 } 2206 c := v_0.AuxInt 2207 x := v_0.Args[0] 2208 v_1 := v.Args[1] 2209 if v_1.Op != OpAMD64SHRQconst { 2210 break 2211 } 2212 d := v_1.AuxInt 2213 if x != v_1.Args[0] || !(d == 64-c) { 2214 break 2215 } 2216 v.reset(OpAMD64ROLQconst) 2217 v.AuxInt = c 2218 v.AddArg(x) 2219 return true 2220 } 2221 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 2222 // cond: d==64-c 2223 // result: (ROLQconst x [c]) 2224 for { 2225 _ = v.Args[1] 2226 v_0 := v.Args[0] 2227 if v_0.Op != OpAMD64SHRQconst { 2228 break 2229 } 2230 d := v_0.AuxInt 2231 x := v_0.Args[0] 2232 v_1 := v.Args[1] 2233 if v_1.Op != OpAMD64SHLQconst { 2234 break 2235 } 2236 c := v_1.AuxInt 2237 if x != v_1.Args[0] || !(d == 64-c) { 2238 break 2239 } 2240 v.reset(OpAMD64ROLQconst) 2241 v.AuxInt = c 2242 v.AddArg(x) 2243 return true 2244 } 2245 // match: (ADDQ x (SHLQconst [3] y)) 2246 // result: (LEAQ8 x y) 2247 for { 2248 _ = v.Args[1] 2249 x := v.Args[0] 2250 v_1 := v.Args[1] 2251 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { 2252 break 2253 } 2254 y := v_1.Args[0] 2255 v.reset(OpAMD64LEAQ8) 2256 v.AddArg(x) 2257 v.AddArg(y) 2258 return true 2259 } 2260 // match: (ADDQ (SHLQconst [3] y) x) 2261 // result: (LEAQ8 x y) 2262 for { 2263 x := v.Args[1] 2264 v_0 := v.Args[0] 2265 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 3 { 2266 break 2267 } 2268 y := v_0.Args[0] 2269 v.reset(OpAMD64LEAQ8) 2270 v.AddArg(x) 2271 v.AddArg(y) 2272 return true 2273 } 2274 // match: (ADDQ x (SHLQconst [2] y)) 2275 // result: (LEAQ4 x y) 2276 for { 2277 _ = v.Args[1] 2278 x := v.Args[0] 2279 v_1 := v.Args[1] 2280 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { 2281 break 2282 } 2283 y := v_1.Args[0] 2284 v.reset(OpAMD64LEAQ4) 2285 v.AddArg(x) 2286 v.AddArg(y) 2287 return true 2288 } 2289 // match: (ADDQ (SHLQconst [2] y) x) 2290 // result: (LEAQ4 x y) 2291 for { 2292 x := v.Args[1] 2293 v_0 := v.Args[0] 2294 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 2 { 2295 break 2296 } 2297 y := v_0.Args[0] 2298 v.reset(OpAMD64LEAQ4) 2299 v.AddArg(x) 2300 v.AddArg(y) 2301 return true 2302 } 2303 // match: (ADDQ x (SHLQconst [1] y)) 2304 // result: (LEAQ2 x y) 2305 for { 2306 _ = v.Args[1] 2307 x := v.Args[0] 2308 v_1 := v.Args[1] 2309 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { 2310 break 2311 } 2312 y := v_1.Args[0] 2313 v.reset(OpAMD64LEAQ2) 2314 v.AddArg(x) 2315 v.AddArg(y) 2316 return true 2317 } 2318 // match: (ADDQ (SHLQconst [1] y) x) 2319 // result: (LEAQ2 x y) 2320 for { 2321 x := v.Args[1] 2322 v_0 := v.Args[0] 2323 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { 2324 break 2325 } 2326 y := v_0.Args[0] 2327 v.reset(OpAMD64LEAQ2) 2328 v.AddArg(x) 2329 v.AddArg(y) 2330 return true 2331 } 2332 return false 2333 } 2334 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { 2335 // match: (ADDQ x (ADDQ y y)) 2336 // result: (LEAQ2 x y) 2337 for { 2338 _ = v.Args[1] 2339 x := v.Args[0] 2340 v_1 := v.Args[1] 2341 if v_1.Op != OpAMD64ADDQ { 2342 break 2343 } 2344 y := v_1.Args[1] 2345 if y != v_1.Args[0] { 2346 break 2347 } 2348 v.reset(OpAMD64LEAQ2) 2349 v.AddArg(x) 2350 v.AddArg(y) 2351 return true 2352 } 2353 // match: (ADDQ (ADDQ y y) x) 2354 // result: (LEAQ2 x y) 2355 for { 2356 x := v.Args[1] 2357 v_0 := v.Args[0] 2358 if v_0.Op != OpAMD64ADDQ { 2359 break 2360 } 2361 y := v_0.Args[1] 2362 if y != v_0.Args[0] { 2363 break 2364 } 2365 v.reset(OpAMD64LEAQ2) 2366 v.AddArg(x) 2367 v.AddArg(y) 2368 return true 2369 } 2370 // match: (ADDQ x (ADDQ x y)) 2371 // result: (LEAQ2 y x) 2372 for { 2373 _ = v.Args[1] 2374 x := v.Args[0] 2375 v_1 := v.Args[1] 2376 if v_1.Op != OpAMD64ADDQ { 2377 break 2378 } 2379 y := v_1.Args[1] 2380 if x != v_1.Args[0] { 2381 break 2382 } 2383 v.reset(OpAMD64LEAQ2) 2384 v.AddArg(y) 2385 v.AddArg(x) 2386 return true 2387 } 2388 // match: (ADDQ x (ADDQ y x)) 2389 // result: (LEAQ2 y x) 2390 for { 2391 _ = v.Args[1] 2392 x := v.Args[0] 2393 v_1 := v.Args[1] 2394 if v_1.Op != OpAMD64ADDQ { 2395 break 2396 } 2397 _ = v_1.Args[1] 2398 y := v_1.Args[0] 2399 if x != v_1.Args[1] { 2400 break 2401 } 2402 v.reset(OpAMD64LEAQ2) 2403 v.AddArg(y) 2404 v.AddArg(x) 2405 return true 2406 } 2407 // match: (ADDQ (ADDQ x y) x) 2408 // result: (LEAQ2 y x) 2409 for { 2410 x := v.Args[1] 2411 v_0 := v.Args[0] 2412 if v_0.Op != OpAMD64ADDQ { 2413 break 2414 } 2415 y := v_0.Args[1] 2416 if x != v_0.Args[0] { 2417 break 2418 } 2419 v.reset(OpAMD64LEAQ2) 2420 v.AddArg(y) 2421 v.AddArg(x) 2422 return true 2423 } 2424 // match: (ADDQ (ADDQ y x) x) 2425 // result: (LEAQ2 y x) 2426 for { 2427 x := v.Args[1] 2428 v_0 := v.Args[0] 2429 if v_0.Op != OpAMD64ADDQ { 2430 break 2431 } 2432 _ = v_0.Args[1] 2433 y := v_0.Args[0] 2434 if x != v_0.Args[1] { 2435 break 2436 } 2437 v.reset(OpAMD64LEAQ2) 2438 v.AddArg(y) 2439 v.AddArg(x) 2440 return true 2441 } 2442 // match: (ADDQ (ADDQconst [c] x) y) 2443 // result: (LEAQ1 [c] x y) 2444 for { 2445 y := v.Args[1] 2446 v_0 := v.Args[0] 2447 if v_0.Op != OpAMD64ADDQconst { 2448 break 2449 } 2450 c := v_0.AuxInt 2451 x := v_0.Args[0] 2452 v.reset(OpAMD64LEAQ1) 2453 v.AuxInt = c 2454 v.AddArg(x) 2455 v.AddArg(y) 2456 return true 2457 } 2458 // match: (ADDQ y (ADDQconst [c] x)) 2459 // result: (LEAQ1 [c] x y) 2460 for { 2461 _ = v.Args[1] 2462 y := v.Args[0] 2463 v_1 := v.Args[1] 2464 if v_1.Op != OpAMD64ADDQconst { 2465 break 2466 } 2467 c := v_1.AuxInt 2468 x := v_1.Args[0] 2469 v.reset(OpAMD64LEAQ1) 2470 v.AuxInt = c 2471 v.AddArg(x) 2472 v.AddArg(y) 2473 return true 2474 } 2475 // match: (ADDQ x (LEAQ [c] {s} y)) 2476 // cond: x.Op != OpSB && y.Op != OpSB 2477 // result: (LEAQ1 [c] {s} x y) 2478 for { 2479 _ = v.Args[1] 2480 x := v.Args[0] 2481 v_1 := v.Args[1] 2482 if v_1.Op != OpAMD64LEAQ { 2483 break 2484 } 2485 c := v_1.AuxInt 2486 s := v_1.Aux 2487 y := v_1.Args[0] 2488 if !(x.Op != OpSB && y.Op != OpSB) { 2489 break 2490 } 2491 v.reset(OpAMD64LEAQ1) 2492 v.AuxInt = c 2493 v.Aux = s 2494 v.AddArg(x) 2495 v.AddArg(y) 2496 return true 2497 } 2498 // match: (ADDQ (LEAQ [c] {s} y) x) 2499 // cond: x.Op != OpSB && y.Op != OpSB 2500 // result: (LEAQ1 [c] {s} x y) 2501 for { 2502 x := v.Args[1] 2503 v_0 := v.Args[0] 2504 if v_0.Op != OpAMD64LEAQ { 2505 break 2506 } 2507 c := v_0.AuxInt 2508 s := v_0.Aux 2509 y := v_0.Args[0] 2510 if !(x.Op != OpSB && y.Op != OpSB) { 2511 break 2512 } 2513 v.reset(OpAMD64LEAQ1) 2514 v.AuxInt = c 2515 v.Aux = s 2516 v.AddArg(x) 2517 v.AddArg(y) 2518 return true 2519 } 2520 return false 2521 } 2522 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { 2523 // match: (ADDQ x (NEGQ y)) 2524 // result: (SUBQ x y) 2525 for { 2526 _ = v.Args[1] 2527 x := v.Args[0] 2528 v_1 := v.Args[1] 2529 if v_1.Op != OpAMD64NEGQ { 2530 break 2531 } 2532 y := v_1.Args[0] 2533 v.reset(OpAMD64SUBQ) 2534 v.AddArg(x) 2535 v.AddArg(y) 2536 return true 2537 } 2538 // match: (ADDQ (NEGQ y) x) 2539 // result: (SUBQ x y) 2540 for { 2541 x := v.Args[1] 2542 v_0 := v.Args[0] 2543 if v_0.Op != OpAMD64NEGQ { 2544 break 2545 } 2546 y := v_0.Args[0] 2547 v.reset(OpAMD64SUBQ) 2548 v.AddArg(x) 2549 v.AddArg(y) 2550 return true 2551 } 2552 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 2553 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 2554 // result: (ADDQload x [off] {sym} ptr mem) 2555 for { 2556 _ = v.Args[1] 2557 x := v.Args[0] 2558 l := v.Args[1] 2559 if l.Op != OpAMD64MOVQload { 2560 break 2561 } 2562 off := l.AuxInt 2563 sym := l.Aux 2564 mem := l.Args[1] 2565 ptr := l.Args[0] 2566 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 2567 break 2568 } 2569 v.reset(OpAMD64ADDQload) 2570 v.AuxInt = off 2571 v.Aux = sym 2572 v.AddArg(x) 2573 v.AddArg(ptr) 2574 v.AddArg(mem) 2575 return true 2576 } 2577 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 2578 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 2579 // result: (ADDQload x [off] {sym} ptr mem) 2580 for { 2581 x := v.Args[1] 2582 l := v.Args[0] 2583 if l.Op != OpAMD64MOVQload { 2584 break 2585 } 2586 off := l.AuxInt 2587 sym := l.Aux 2588 mem := l.Args[1] 2589 ptr := l.Args[0] 2590 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 2591 break 2592 } 2593 v.reset(OpAMD64ADDQload) 2594 v.AuxInt = off 2595 v.Aux = sym 2596 v.AddArg(x) 2597 v.AddArg(ptr) 2598 v.AddArg(mem) 2599 return true 2600 } 2601 return false 2602 } 2603 func rewriteValueAMD64_OpAMD64ADDQcarry_0(v *Value) bool { 2604 // match: (ADDQcarry x (MOVQconst [c])) 2605 // cond: is32Bit(c) 2606 // result: (ADDQconstcarry x [c]) 2607 for { 2608 _ = v.Args[1] 2609 x := v.Args[0] 2610 v_1 := v.Args[1] 2611 if v_1.Op != OpAMD64MOVQconst { 2612 break 2613 } 2614 c := v_1.AuxInt 2615 if !(is32Bit(c)) { 2616 break 2617 } 2618 v.reset(OpAMD64ADDQconstcarry) 2619 v.AuxInt = c 2620 v.AddArg(x) 2621 return true 2622 } 2623 // match: (ADDQcarry (MOVQconst [c]) x) 2624 // cond: is32Bit(c) 2625 // result: (ADDQconstcarry x [c]) 2626 for { 2627 x := v.Args[1] 2628 v_0 := v.Args[0] 2629 if v_0.Op != OpAMD64MOVQconst { 2630 break 2631 } 2632 c := v_0.AuxInt 2633 if !(is32Bit(c)) { 2634 break 2635 } 2636 v.reset(OpAMD64ADDQconstcarry) 2637 v.AuxInt = c 2638 v.AddArg(x) 2639 return true 2640 } 2641 return false 2642 } 2643 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { 2644 // match: (ADDQconst [c] (ADDQ x y)) 2645 // result: (LEAQ1 [c] x y) 2646 for { 2647 c := v.AuxInt 2648 v_0 := v.Args[0] 2649 if v_0.Op != OpAMD64ADDQ { 2650 break 2651 } 2652 y := v_0.Args[1] 2653 x := v_0.Args[0] 2654 v.reset(OpAMD64LEAQ1) 2655 v.AuxInt = c 2656 v.AddArg(x) 2657 v.AddArg(y) 2658 return true 2659 } 2660 // match: (ADDQconst [c] (SHLQconst [1] x)) 2661 // result: (LEAQ1 [c] x x) 2662 for { 2663 c := v.AuxInt 2664 v_0 := v.Args[0] 2665 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { 2666 break 2667 } 2668 x := v_0.Args[0] 2669 v.reset(OpAMD64LEAQ1) 2670 v.AuxInt = c 2671 v.AddArg(x) 2672 v.AddArg(x) 2673 return true 2674 } 2675 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 2676 // cond: is32Bit(c+d) 2677 // result: (LEAQ [c+d] {s} x) 2678 for { 2679 c := v.AuxInt 2680 v_0 := v.Args[0] 2681 if v_0.Op != OpAMD64LEAQ { 2682 break 2683 } 2684 d := v_0.AuxInt 2685 s := v_0.Aux 2686 x := v_0.Args[0] 2687 if !(is32Bit(c + d)) { 2688 break 2689 } 2690 v.reset(OpAMD64LEAQ) 2691 v.AuxInt = c + d 2692 v.Aux = s 2693 v.AddArg(x) 2694 return true 2695 } 2696 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 2697 // cond: is32Bit(c+d) 2698 // result: (LEAQ1 [c+d] {s} x y) 2699 for { 2700 c := v.AuxInt 2701 v_0 := v.Args[0] 2702 if v_0.Op != OpAMD64LEAQ1 { 2703 break 2704 } 2705 d := v_0.AuxInt 2706 s := v_0.Aux 2707 y := v_0.Args[1] 2708 x := v_0.Args[0] 2709 if !(is32Bit(c + d)) { 2710 break 2711 } 2712 v.reset(OpAMD64LEAQ1) 2713 v.AuxInt = c + d 2714 v.Aux = s 2715 v.AddArg(x) 2716 v.AddArg(y) 2717 return true 2718 } 2719 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 2720 // cond: is32Bit(c+d) 2721 // result: (LEAQ2 [c+d] {s} x y) 2722 for { 2723 c := v.AuxInt 2724 v_0 := v.Args[0] 2725 if v_0.Op != OpAMD64LEAQ2 { 2726 break 2727 } 2728 d := v_0.AuxInt 2729 s := v_0.Aux 2730 y := v_0.Args[1] 2731 x := v_0.Args[0] 2732 if !(is32Bit(c + d)) { 2733 break 2734 } 2735 v.reset(OpAMD64LEAQ2) 2736 v.AuxInt = c + d 2737 v.Aux = s 2738 v.AddArg(x) 2739 v.AddArg(y) 2740 return true 2741 } 2742 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 2743 // cond: is32Bit(c+d) 2744 // result: (LEAQ4 [c+d] {s} x y) 2745 for { 2746 c := v.AuxInt 2747 v_0 := v.Args[0] 2748 if v_0.Op != OpAMD64LEAQ4 { 2749 break 2750 } 2751 d := v_0.AuxInt 2752 s := v_0.Aux 2753 y := v_0.Args[1] 2754 x := v_0.Args[0] 2755 if !(is32Bit(c + d)) { 2756 break 2757 } 2758 v.reset(OpAMD64LEAQ4) 2759 v.AuxInt = c + d 2760 v.Aux = s 2761 v.AddArg(x) 2762 v.AddArg(y) 2763 return true 2764 } 2765 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 2766 // cond: is32Bit(c+d) 2767 // result: (LEAQ8 [c+d] {s} x y) 2768 for { 2769 c := v.AuxInt 2770 v_0 := v.Args[0] 2771 if v_0.Op != OpAMD64LEAQ8 { 2772 break 2773 } 2774 d := v_0.AuxInt 2775 s := v_0.Aux 2776 y := v_0.Args[1] 2777 x := v_0.Args[0] 2778 if !(is32Bit(c + d)) { 2779 break 2780 } 2781 v.reset(OpAMD64LEAQ8) 2782 v.AuxInt = c + d 2783 v.Aux = s 2784 v.AddArg(x) 2785 v.AddArg(y) 2786 return true 2787 } 2788 // match: (ADDQconst [0] x) 2789 // result: x 2790 for { 2791 if v.AuxInt != 0 { 2792 break 2793 } 2794 x := v.Args[0] 2795 v.reset(OpCopy) 2796 v.Type = x.Type 2797 v.AddArg(x) 2798 return true 2799 } 2800 // match: (ADDQconst [c] (MOVQconst [d])) 2801 // result: (MOVQconst [c+d]) 2802 for { 2803 c := v.AuxInt 2804 v_0 := v.Args[0] 2805 if v_0.Op != OpAMD64MOVQconst { 2806 break 2807 } 2808 d := v_0.AuxInt 2809 v.reset(OpAMD64MOVQconst) 2810 v.AuxInt = c + d 2811 return true 2812 } 2813 // match: (ADDQconst [c] (ADDQconst [d] x)) 2814 // cond: is32Bit(c+d) 2815 // result: (ADDQconst [c+d] x) 2816 for { 2817 c := v.AuxInt 2818 v_0 := v.Args[0] 2819 if v_0.Op != OpAMD64ADDQconst { 2820 break 2821 } 2822 d := v_0.AuxInt 2823 x := v_0.Args[0] 2824 if !(is32Bit(c + d)) { 2825 break 2826 } 2827 v.reset(OpAMD64ADDQconst) 2828 v.AuxInt = c + d 2829 v.AddArg(x) 2830 return true 2831 } 2832 return false 2833 } 2834 func rewriteValueAMD64_OpAMD64ADDQconst_10(v *Value) bool { 2835 // match: (ADDQconst [off] x:(SP)) 2836 // result: (LEAQ [off] x) 2837 for { 2838 off := v.AuxInt 2839 x := v.Args[0] 2840 if x.Op != OpSP { 2841 break 2842 } 2843 v.reset(OpAMD64LEAQ) 2844 v.AuxInt = off 2845 v.AddArg(x) 2846 return true 2847 } 2848 return false 2849 } 2850 func rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v *Value) bool { 2851 // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 2852 // cond: ValAndOff(valoff1).canAdd(off2) 2853 // result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 2854 for { 2855 valoff1 := v.AuxInt 2856 sym := v.Aux 2857 mem := v.Args[1] 2858 v_0 := v.Args[0] 2859 if v_0.Op != OpAMD64ADDQconst { 2860 break 2861 } 2862 off2 := v_0.AuxInt 2863 base := v_0.Args[0] 2864 if !(ValAndOff(valoff1).canAdd(off2)) { 2865 break 2866 } 2867 v.reset(OpAMD64ADDQconstmodify) 2868 v.AuxInt = ValAndOff(valoff1).add(off2) 2869 v.Aux = sym 2870 v.AddArg(base) 2871 v.AddArg(mem) 2872 return true 2873 } 2874 // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 2875 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 2876 // result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 2877 for { 2878 valoff1 := v.AuxInt 2879 sym1 := v.Aux 2880 mem := v.Args[1] 2881 v_0 := v.Args[0] 2882 if v_0.Op != OpAMD64LEAQ { 2883 break 2884 } 2885 off2 := v_0.AuxInt 2886 sym2 := v_0.Aux 2887 base := v_0.Args[0] 2888 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 2889 break 2890 } 2891 v.reset(OpAMD64ADDQconstmodify) 2892 v.AuxInt = ValAndOff(valoff1).add(off2) 2893 v.Aux = mergeSym(sym1, sym2) 2894 v.AddArg(base) 2895 v.AddArg(mem) 2896 return true 2897 } 2898 return false 2899 } 2900 func rewriteValueAMD64_OpAMD64ADDQload_0(v *Value) bool { 2901 b := v.Block 2902 typ := &b.Func.Config.Types 2903 // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem) 2904 // cond: is32Bit(off1+off2) 2905 // result: (ADDQload [off1+off2] {sym} val base mem) 2906 for { 2907 off1 := v.AuxInt 2908 sym := v.Aux 2909 mem := v.Args[2] 2910 val := v.Args[0] 2911 v_1 := v.Args[1] 2912 if v_1.Op != OpAMD64ADDQconst { 2913 break 2914 } 2915 off2 := v_1.AuxInt 2916 base := v_1.Args[0] 2917 if !(is32Bit(off1 + off2)) { 2918 break 2919 } 2920 v.reset(OpAMD64ADDQload) 2921 v.AuxInt = off1 + off2 2922 v.Aux = sym 2923 v.AddArg(val) 2924 v.AddArg(base) 2925 v.AddArg(mem) 2926 return true 2927 } 2928 // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 2929 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 2930 // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 2931 for { 2932 off1 := v.AuxInt 2933 sym1 := v.Aux 2934 mem := v.Args[2] 2935 val := v.Args[0] 2936 v_1 := v.Args[1] 2937 if v_1.Op != OpAMD64LEAQ { 2938 break 2939 } 2940 off2 := v_1.AuxInt 2941 sym2 := v_1.Aux 2942 base := v_1.Args[0] 2943 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 2944 break 2945 } 2946 v.reset(OpAMD64ADDQload) 2947 v.AuxInt = off1 + off2 2948 v.Aux = mergeSym(sym1, sym2) 2949 v.AddArg(val) 2950 v.AddArg(base) 2951 v.AddArg(mem) 2952 return true 2953 } 2954 // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2955 // result: (ADDQ x (MOVQf2i y)) 2956 for { 2957 off := v.AuxInt 2958 sym := v.Aux 2959 _ = v.Args[2] 2960 x := v.Args[0] 2961 ptr := v.Args[1] 2962 v_2 := v.Args[2] 2963 if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { 2964 break 2965 } 2966 _ = v_2.Args[2] 2967 if ptr != v_2.Args[0] { 2968 break 2969 } 2970 y := v_2.Args[1] 2971 v.reset(OpAMD64ADDQ) 2972 v.AddArg(x) 2973 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 2974 v0.AddArg(y) 2975 v.AddArg(v0) 2976 return true 2977 } 2978 return false 2979 } 2980 func rewriteValueAMD64_OpAMD64ADDQmodify_0(v *Value) bool { 2981 // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 2982 // cond: is32Bit(off1+off2) 2983 // result: (ADDQmodify [off1+off2] {sym} base val mem) 2984 for { 2985 off1 := v.AuxInt 2986 sym := v.Aux 2987 mem := v.Args[2] 2988 v_0 := v.Args[0] 2989 if v_0.Op != OpAMD64ADDQconst { 2990 break 2991 } 2992 off2 := v_0.AuxInt 2993 base := v_0.Args[0] 2994 val := v.Args[1] 2995 if !(is32Bit(off1 + off2)) { 2996 break 2997 } 2998 v.reset(OpAMD64ADDQmodify) 2999 v.AuxInt = off1 + off2 3000 v.Aux = sym 3001 v.AddArg(base) 3002 v.AddArg(val) 3003 v.AddArg(mem) 3004 return true 3005 } 3006 // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 3007 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3008 // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3009 for { 3010 off1 := v.AuxInt 3011 sym1 := v.Aux 3012 mem := v.Args[2] 3013 v_0 := v.Args[0] 3014 if v_0.Op != OpAMD64LEAQ { 3015 break 3016 } 3017 off2 := v_0.AuxInt 3018 sym2 := v_0.Aux 3019 base := v_0.Args[0] 3020 val := v.Args[1] 3021 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3022 break 3023 } 3024 v.reset(OpAMD64ADDQmodify) 3025 v.AuxInt = off1 + off2 3026 v.Aux = mergeSym(sym1, sym2) 3027 v.AddArg(base) 3028 v.AddArg(val) 3029 v.AddArg(mem) 3030 return true 3031 } 3032 return false 3033 } 3034 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { 3035 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 3036 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3037 // result: (ADDSDload x [off] {sym} ptr mem) 3038 for { 3039 _ = v.Args[1] 3040 x := v.Args[0] 3041 l := v.Args[1] 3042 if l.Op != OpAMD64MOVSDload { 3043 break 3044 } 3045 off := l.AuxInt 3046 sym := l.Aux 3047 mem := l.Args[1] 3048 ptr := l.Args[0] 3049 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3050 break 3051 } 3052 v.reset(OpAMD64ADDSDload) 3053 v.AuxInt = off 3054 v.Aux = sym 3055 v.AddArg(x) 3056 v.AddArg(ptr) 3057 v.AddArg(mem) 3058 return true 3059 } 3060 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 3061 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3062 // result: (ADDSDload x [off] {sym} ptr mem) 3063 for { 3064 x := v.Args[1] 3065 l := v.Args[0] 3066 if l.Op != OpAMD64MOVSDload { 3067 break 3068 } 3069 off := l.AuxInt 3070 sym := l.Aux 3071 mem := l.Args[1] 3072 ptr := l.Args[0] 3073 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3074 break 3075 } 3076 v.reset(OpAMD64ADDSDload) 3077 v.AuxInt = off 3078 v.Aux = sym 3079 v.AddArg(x) 3080 v.AddArg(ptr) 3081 v.AddArg(mem) 3082 return true 3083 } 3084 return false 3085 } 3086 func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool { 3087 b := v.Block 3088 typ := &b.Func.Config.Types 3089 // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem) 3090 // cond: is32Bit(off1+off2) 3091 // result: (ADDSDload [off1+off2] {sym} val base mem) 3092 for { 3093 off1 := v.AuxInt 3094 sym := v.Aux 3095 mem := v.Args[2] 3096 val := v.Args[0] 3097 v_1 := v.Args[1] 3098 if v_1.Op != OpAMD64ADDQconst { 3099 break 3100 } 3101 off2 := v_1.AuxInt 3102 base := v_1.Args[0] 3103 if !(is32Bit(off1 + off2)) { 3104 break 3105 } 3106 v.reset(OpAMD64ADDSDload) 3107 v.AuxInt = off1 + off2 3108 v.Aux = sym 3109 v.AddArg(val) 3110 v.AddArg(base) 3111 v.AddArg(mem) 3112 return true 3113 } 3114 // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 3115 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3116 // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 3117 for { 3118 off1 := v.AuxInt 3119 sym1 := v.Aux 3120 mem := v.Args[2] 3121 val := v.Args[0] 3122 v_1 := v.Args[1] 3123 if v_1.Op != OpAMD64LEAQ { 3124 break 3125 } 3126 off2 := v_1.AuxInt 3127 sym2 := v_1.Aux 3128 base := v_1.Args[0] 3129 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3130 break 3131 } 3132 v.reset(OpAMD64ADDSDload) 3133 v.AuxInt = off1 + off2 3134 v.Aux = mergeSym(sym1, sym2) 3135 v.AddArg(val) 3136 v.AddArg(base) 3137 v.AddArg(mem) 3138 return true 3139 } 3140 // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 3141 // result: (ADDSD x (MOVQi2f y)) 3142 for { 3143 off := v.AuxInt 3144 sym := v.Aux 3145 _ = v.Args[2] 3146 x := v.Args[0] 3147 ptr := v.Args[1] 3148 v_2 := v.Args[2] 3149 if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { 3150 break 3151 } 3152 _ = v_2.Args[2] 3153 if ptr != v_2.Args[0] { 3154 break 3155 } 3156 y := v_2.Args[1] 3157 v.reset(OpAMD64ADDSD) 3158 v.AddArg(x) 3159 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) 3160 v0.AddArg(y) 3161 v.AddArg(v0) 3162 return true 3163 } 3164 return false 3165 } 3166 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { 3167 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 3168 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3169 // result: (ADDSSload x [off] {sym} ptr mem) 3170 for { 3171 _ = v.Args[1] 3172 x := v.Args[0] 3173 l := v.Args[1] 3174 if l.Op != OpAMD64MOVSSload { 3175 break 3176 } 3177 off := l.AuxInt 3178 sym := l.Aux 3179 mem := l.Args[1] 3180 ptr := l.Args[0] 3181 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3182 break 3183 } 3184 v.reset(OpAMD64ADDSSload) 3185 v.AuxInt = off 3186 v.Aux = sym 3187 v.AddArg(x) 3188 v.AddArg(ptr) 3189 v.AddArg(mem) 3190 return true 3191 } 3192 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 3193 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3194 // result: (ADDSSload x [off] {sym} ptr mem) 3195 for { 3196 x := v.Args[1] 3197 l := v.Args[0] 3198 if l.Op != OpAMD64MOVSSload { 3199 break 3200 } 3201 off := l.AuxInt 3202 sym := l.Aux 3203 mem := l.Args[1] 3204 ptr := l.Args[0] 3205 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3206 break 3207 } 3208 v.reset(OpAMD64ADDSSload) 3209 v.AuxInt = off 3210 v.Aux = sym 3211 v.AddArg(x) 3212 v.AddArg(ptr) 3213 v.AddArg(mem) 3214 return true 3215 } 3216 return false 3217 } 3218 func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool { 3219 b := v.Block 3220 typ := &b.Func.Config.Types 3221 // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem) 3222 // cond: is32Bit(off1+off2) 3223 // result: (ADDSSload [off1+off2] {sym} val base mem) 3224 for { 3225 off1 := v.AuxInt 3226 sym := v.Aux 3227 mem := v.Args[2] 3228 val := v.Args[0] 3229 v_1 := v.Args[1] 3230 if v_1.Op != OpAMD64ADDQconst { 3231 break 3232 } 3233 off2 := v_1.AuxInt 3234 base := v_1.Args[0] 3235 if !(is32Bit(off1 + off2)) { 3236 break 3237 } 3238 v.reset(OpAMD64ADDSSload) 3239 v.AuxInt = off1 + off2 3240 v.Aux = sym 3241 v.AddArg(val) 3242 v.AddArg(base) 3243 v.AddArg(mem) 3244 return true 3245 } 3246 // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 3247 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3248 // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 3249 for { 3250 off1 := v.AuxInt 3251 sym1 := v.Aux 3252 mem := v.Args[2] 3253 val := v.Args[0] 3254 v_1 := v.Args[1] 3255 if v_1.Op != OpAMD64LEAQ { 3256 break 3257 } 3258 off2 := v_1.AuxInt 3259 sym2 := v_1.Aux 3260 base := v_1.Args[0] 3261 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3262 break 3263 } 3264 v.reset(OpAMD64ADDSSload) 3265 v.AuxInt = off1 + off2 3266 v.Aux = mergeSym(sym1, sym2) 3267 v.AddArg(val) 3268 v.AddArg(base) 3269 v.AddArg(mem) 3270 return true 3271 } 3272 // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 3273 // result: (ADDSS x (MOVLi2f y)) 3274 for { 3275 off := v.AuxInt 3276 sym := v.Aux 3277 _ = v.Args[2] 3278 x := v.Args[0] 3279 ptr := v.Args[1] 3280 v_2 := v.Args[2] 3281 if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { 3282 break 3283 } 3284 _ = v_2.Args[2] 3285 if ptr != v_2.Args[0] { 3286 break 3287 } 3288 y := v_2.Args[1] 3289 v.reset(OpAMD64ADDSS) 3290 v.AddArg(x) 3291 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) 3292 v0.AddArg(y) 3293 v.AddArg(v0) 3294 return true 3295 } 3296 return false 3297 } 3298 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { 3299 // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x) 3300 // result: (BTRL x y) 3301 for { 3302 x := v.Args[1] 3303 v_0 := v.Args[0] 3304 if v_0.Op != OpAMD64NOTL { 3305 break 3306 } 3307 v_0_0 := v_0.Args[0] 3308 if v_0_0.Op != OpAMD64SHLL { 3309 break 3310 } 3311 y := v_0_0.Args[1] 3312 v_0_0_0 := v_0_0.Args[0] 3313 if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { 3314 break 3315 } 3316 v.reset(OpAMD64BTRL) 3317 v.AddArg(x) 3318 v.AddArg(y) 3319 return true 3320 } 3321 // match: (ANDL x (NOTL (SHLL (MOVLconst [1]) y))) 3322 // result: (BTRL x y) 3323 for { 3324 _ = v.Args[1] 3325 x := v.Args[0] 3326 v_1 := v.Args[1] 3327 if v_1.Op != OpAMD64NOTL { 3328 break 3329 } 3330 v_1_0 := v_1.Args[0] 3331 if v_1_0.Op != OpAMD64SHLL { 3332 break 3333 } 3334 y := v_1_0.Args[1] 3335 v_1_0_0 := v_1_0.Args[0] 3336 if v_1_0_0.Op != OpAMD64MOVLconst || v_1_0_0.AuxInt != 1 { 3337 break 3338 } 3339 v.reset(OpAMD64BTRL) 3340 v.AddArg(x) 3341 v.AddArg(y) 3342 return true 3343 } 3344 // match: (ANDL (MOVLconst [c]) x) 3345 // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 3346 // result: (BTRLconst [log2uint32(^c)] x) 3347 for { 3348 x := v.Args[1] 3349 v_0 := v.Args[0] 3350 if v_0.Op != OpAMD64MOVLconst { 3351 break 3352 } 3353 c := v_0.AuxInt 3354 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128) { 3355 break 3356 } 3357 v.reset(OpAMD64BTRLconst) 3358 v.AuxInt = log2uint32(^c) 3359 v.AddArg(x) 3360 return true 3361 } 3362 // match: (ANDL x (MOVLconst [c])) 3363 // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 3364 // result: (BTRLconst [log2uint32(^c)] x) 3365 for { 3366 _ = v.Args[1] 3367 x := v.Args[0] 3368 v_1 := v.Args[1] 3369 if v_1.Op != OpAMD64MOVLconst { 3370 break 3371 } 3372 c := v_1.AuxInt 3373 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128) { 3374 break 3375 } 3376 v.reset(OpAMD64BTRLconst) 3377 v.AuxInt = log2uint32(^c) 3378 v.AddArg(x) 3379 return true 3380 } 3381 // match: (ANDL x (MOVLconst [c])) 3382 // result: (ANDLconst [c] x) 3383 for { 3384 _ = v.Args[1] 3385 x := v.Args[0] 3386 v_1 := v.Args[1] 3387 if v_1.Op != OpAMD64MOVLconst { 3388 break 3389 } 3390 c := v_1.AuxInt 3391 v.reset(OpAMD64ANDLconst) 3392 v.AuxInt = c 3393 v.AddArg(x) 3394 return true 3395 } 3396 // match: (ANDL (MOVLconst [c]) x) 3397 // result: (ANDLconst [c] x) 3398 for { 3399 x := v.Args[1] 3400 v_0 := v.Args[0] 3401 if v_0.Op != OpAMD64MOVLconst { 3402 break 3403 } 3404 c := v_0.AuxInt 3405 v.reset(OpAMD64ANDLconst) 3406 v.AuxInt = c 3407 v.AddArg(x) 3408 return true 3409 } 3410 // match: (ANDL x x) 3411 // result: x 3412 for { 3413 x := v.Args[1] 3414 if x != v.Args[0] { 3415 break 3416 } 3417 v.reset(OpCopy) 3418 v.Type = x.Type 3419 v.AddArg(x) 3420 return true 3421 } 3422 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 3423 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3424 // result: (ANDLload x [off] {sym} ptr mem) 3425 for { 3426 _ = v.Args[1] 3427 x := v.Args[0] 3428 l := v.Args[1] 3429 if l.Op != OpAMD64MOVLload { 3430 break 3431 } 3432 off := l.AuxInt 3433 sym := l.Aux 3434 mem := l.Args[1] 3435 ptr := l.Args[0] 3436 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3437 break 3438 } 3439 v.reset(OpAMD64ANDLload) 3440 v.AuxInt = off 3441 v.Aux = sym 3442 v.AddArg(x) 3443 v.AddArg(ptr) 3444 v.AddArg(mem) 3445 return true 3446 } 3447 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 3448 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3449 // result: (ANDLload x [off] {sym} ptr mem) 3450 for { 3451 x := v.Args[1] 3452 l := v.Args[0] 3453 if l.Op != OpAMD64MOVLload { 3454 break 3455 } 3456 off := l.AuxInt 3457 sym := l.Aux 3458 mem := l.Args[1] 3459 ptr := l.Args[0] 3460 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3461 break 3462 } 3463 v.reset(OpAMD64ANDLload) 3464 v.AuxInt = off 3465 v.Aux = sym 3466 v.AddArg(x) 3467 v.AddArg(ptr) 3468 v.AddArg(mem) 3469 return true 3470 } 3471 return false 3472 } 3473 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { 3474 // match: (ANDLconst [c] x) 3475 // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 3476 // result: (BTRLconst [log2uint32(^c)] x) 3477 for { 3478 c := v.AuxInt 3479 x := v.Args[0] 3480 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128) { 3481 break 3482 } 3483 v.reset(OpAMD64BTRLconst) 3484 v.AuxInt = log2uint32(^c) 3485 v.AddArg(x) 3486 return true 3487 } 3488 // match: (ANDLconst [c] (ANDLconst [d] x)) 3489 // result: (ANDLconst [c & d] x) 3490 for { 3491 c := v.AuxInt 3492 v_0 := v.Args[0] 3493 if v_0.Op != OpAMD64ANDLconst { 3494 break 3495 } 3496 d := v_0.AuxInt 3497 x := v_0.Args[0] 3498 v.reset(OpAMD64ANDLconst) 3499 v.AuxInt = c & d 3500 v.AddArg(x) 3501 return true 3502 } 3503 // match: (ANDLconst [c] (BTRLconst [d] x)) 3504 // result: (ANDLconst [c &^ (1<<uint32(d))] x) 3505 for { 3506 c := v.AuxInt 3507 v_0 := v.Args[0] 3508 if v_0.Op != OpAMD64BTRLconst { 3509 break 3510 } 3511 d := v_0.AuxInt 3512 x := v_0.Args[0] 3513 v.reset(OpAMD64ANDLconst) 3514 v.AuxInt = c &^ (1 << uint32(d)) 3515 v.AddArg(x) 3516 return true 3517 } 3518 // match: (ANDLconst [ 0xFF] x) 3519 // result: (MOVBQZX x) 3520 for { 3521 if v.AuxInt != 0xFF { 3522 break 3523 } 3524 x := v.Args[0] 3525 v.reset(OpAMD64MOVBQZX) 3526 v.AddArg(x) 3527 return true 3528 } 3529 // match: (ANDLconst [0xFFFF] x) 3530 // result: (MOVWQZX x) 3531 for { 3532 if v.AuxInt != 0xFFFF { 3533 break 3534 } 3535 x := v.Args[0] 3536 v.reset(OpAMD64MOVWQZX) 3537 v.AddArg(x) 3538 return true 3539 } 3540 // match: (ANDLconst [c] _) 3541 // cond: int32(c)==0 3542 // result: (MOVLconst [0]) 3543 for { 3544 c := v.AuxInt 3545 if !(int32(c) == 0) { 3546 break 3547 } 3548 v.reset(OpAMD64MOVLconst) 3549 v.AuxInt = 0 3550 return true 3551 } 3552 // match: (ANDLconst [c] x) 3553 // cond: int32(c)==-1 3554 // result: x 3555 for { 3556 c := v.AuxInt 3557 x := v.Args[0] 3558 if !(int32(c) == -1) { 3559 break 3560 } 3561 v.reset(OpCopy) 3562 v.Type = x.Type 3563 v.AddArg(x) 3564 return true 3565 } 3566 // match: (ANDLconst [c] (MOVLconst [d])) 3567 // result: (MOVLconst [c&d]) 3568 for { 3569 c := v.AuxInt 3570 v_0 := v.Args[0] 3571 if v_0.Op != OpAMD64MOVLconst { 3572 break 3573 } 3574 d := v_0.AuxInt 3575 v.reset(OpAMD64MOVLconst) 3576 v.AuxInt = c & d 3577 return true 3578 } 3579 return false 3580 } 3581 func rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v *Value) bool { 3582 // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 3583 // cond: ValAndOff(valoff1).canAdd(off2) 3584 // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 3585 for { 3586 valoff1 := v.AuxInt 3587 sym := v.Aux 3588 mem := v.Args[1] 3589 v_0 := v.Args[0] 3590 if v_0.Op != OpAMD64ADDQconst { 3591 break 3592 } 3593 off2 := v_0.AuxInt 3594 base := v_0.Args[0] 3595 if !(ValAndOff(valoff1).canAdd(off2)) { 3596 break 3597 } 3598 v.reset(OpAMD64ANDLconstmodify) 3599 v.AuxInt = ValAndOff(valoff1).add(off2) 3600 v.Aux = sym 3601 v.AddArg(base) 3602 v.AddArg(mem) 3603 return true 3604 } 3605 // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 3606 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 3607 // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 3608 for { 3609 valoff1 := v.AuxInt 3610 sym1 := v.Aux 3611 mem := v.Args[1] 3612 v_0 := v.Args[0] 3613 if v_0.Op != OpAMD64LEAQ { 3614 break 3615 } 3616 off2 := v_0.AuxInt 3617 sym2 := v_0.Aux 3618 base := v_0.Args[0] 3619 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 3620 break 3621 } 3622 v.reset(OpAMD64ANDLconstmodify) 3623 v.AuxInt = ValAndOff(valoff1).add(off2) 3624 v.Aux = mergeSym(sym1, sym2) 3625 v.AddArg(base) 3626 v.AddArg(mem) 3627 return true 3628 } 3629 return false 3630 } 3631 func rewriteValueAMD64_OpAMD64ANDLload_0(v *Value) bool { 3632 b := v.Block 3633 typ := &b.Func.Config.Types 3634 // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem) 3635 // cond: is32Bit(off1+off2) 3636 // result: (ANDLload [off1+off2] {sym} val base mem) 3637 for { 3638 off1 := v.AuxInt 3639 sym := v.Aux 3640 mem := v.Args[2] 3641 val := v.Args[0] 3642 v_1 := v.Args[1] 3643 if v_1.Op != OpAMD64ADDQconst { 3644 break 3645 } 3646 off2 := v_1.AuxInt 3647 base := v_1.Args[0] 3648 if !(is32Bit(off1 + off2)) { 3649 break 3650 } 3651 v.reset(OpAMD64ANDLload) 3652 v.AuxInt = off1 + off2 3653 v.Aux = sym 3654 v.AddArg(val) 3655 v.AddArg(base) 3656 v.AddArg(mem) 3657 return true 3658 } 3659 // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 3660 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3661 // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 3662 for { 3663 off1 := v.AuxInt 3664 sym1 := v.Aux 3665 mem := v.Args[2] 3666 val := v.Args[0] 3667 v_1 := v.Args[1] 3668 if v_1.Op != OpAMD64LEAQ { 3669 break 3670 } 3671 off2 := v_1.AuxInt 3672 sym2 := v_1.Aux 3673 base := v_1.Args[0] 3674 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3675 break 3676 } 3677 v.reset(OpAMD64ANDLload) 3678 v.AuxInt = off1 + off2 3679 v.Aux = mergeSym(sym1, sym2) 3680 v.AddArg(val) 3681 v.AddArg(base) 3682 v.AddArg(mem) 3683 return true 3684 } 3685 // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 3686 // result: (ANDL x (MOVLf2i y)) 3687 for { 3688 off := v.AuxInt 3689 sym := v.Aux 3690 _ = v.Args[2] 3691 x := v.Args[0] 3692 ptr := v.Args[1] 3693 v_2 := v.Args[2] 3694 if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { 3695 break 3696 } 3697 _ = v_2.Args[2] 3698 if ptr != v_2.Args[0] { 3699 break 3700 } 3701 y := v_2.Args[1] 3702 v.reset(OpAMD64ANDL) 3703 v.AddArg(x) 3704 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 3705 v0.AddArg(y) 3706 v.AddArg(v0) 3707 return true 3708 } 3709 return false 3710 } 3711 func rewriteValueAMD64_OpAMD64ANDLmodify_0(v *Value) bool { 3712 // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 3713 // cond: is32Bit(off1+off2) 3714 // result: (ANDLmodify [off1+off2] {sym} base val mem) 3715 for { 3716 off1 := v.AuxInt 3717 sym := v.Aux 3718 mem := v.Args[2] 3719 v_0 := v.Args[0] 3720 if v_0.Op != OpAMD64ADDQconst { 3721 break 3722 } 3723 off2 := v_0.AuxInt 3724 base := v_0.Args[0] 3725 val := v.Args[1] 3726 if !(is32Bit(off1 + off2)) { 3727 break 3728 } 3729 v.reset(OpAMD64ANDLmodify) 3730 v.AuxInt = off1 + off2 3731 v.Aux = sym 3732 v.AddArg(base) 3733 v.AddArg(val) 3734 v.AddArg(mem) 3735 return true 3736 } 3737 // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 3738 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3739 // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 3740 for { 3741 off1 := v.AuxInt 3742 sym1 := v.Aux 3743 mem := v.Args[2] 3744 v_0 := v.Args[0] 3745 if v_0.Op != OpAMD64LEAQ { 3746 break 3747 } 3748 off2 := v_0.AuxInt 3749 sym2 := v_0.Aux 3750 base := v_0.Args[0] 3751 val := v.Args[1] 3752 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3753 break 3754 } 3755 v.reset(OpAMD64ANDLmodify) 3756 v.AuxInt = off1 + off2 3757 v.Aux = mergeSym(sym1, sym2) 3758 v.AddArg(base) 3759 v.AddArg(val) 3760 v.AddArg(mem) 3761 return true 3762 } 3763 return false 3764 } 3765 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { 3766 // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x) 3767 // result: (BTRQ x y) 3768 for { 3769 x := v.Args[1] 3770 v_0 := v.Args[0] 3771 if v_0.Op != OpAMD64NOTQ { 3772 break 3773 } 3774 v_0_0 := v_0.Args[0] 3775 if v_0_0.Op != OpAMD64SHLQ { 3776 break 3777 } 3778 y := v_0_0.Args[1] 3779 v_0_0_0 := v_0_0.Args[0] 3780 if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { 3781 break 3782 } 3783 v.reset(OpAMD64BTRQ) 3784 v.AddArg(x) 3785 v.AddArg(y) 3786 return true 3787 } 3788 // match: (ANDQ x (NOTQ (SHLQ (MOVQconst [1]) y))) 3789 // result: (BTRQ x y) 3790 for { 3791 _ = v.Args[1] 3792 x := v.Args[0] 3793 v_1 := v.Args[1] 3794 if v_1.Op != OpAMD64NOTQ { 3795 break 3796 } 3797 v_1_0 := v_1.Args[0] 3798 if v_1_0.Op != OpAMD64SHLQ { 3799 break 3800 } 3801 y := v_1_0.Args[1] 3802 v_1_0_0 := v_1_0.Args[0] 3803 if v_1_0_0.Op != OpAMD64MOVQconst || v_1_0_0.AuxInt != 1 { 3804 break 3805 } 3806 v.reset(OpAMD64BTRQ) 3807 v.AddArg(x) 3808 v.AddArg(y) 3809 return true 3810 } 3811 // match: (ANDQ (MOVQconst [c]) x) 3812 // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 3813 // result: (BTRQconst [log2(^c)] x) 3814 for { 3815 x := v.Args[1] 3816 v_0 := v.Args[0] 3817 if v_0.Op != OpAMD64MOVQconst { 3818 break 3819 } 3820 c := v_0.AuxInt 3821 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) { 3822 break 3823 } 3824 v.reset(OpAMD64BTRQconst) 3825 v.AuxInt = log2(^c) 3826 v.AddArg(x) 3827 return true 3828 } 3829 // match: (ANDQ x (MOVQconst [c])) 3830 // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 3831 // result: (BTRQconst [log2(^c)] x) 3832 for { 3833 _ = v.Args[1] 3834 x := v.Args[0] 3835 v_1 := v.Args[1] 3836 if v_1.Op != OpAMD64MOVQconst { 3837 break 3838 } 3839 c := v_1.AuxInt 3840 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) { 3841 break 3842 } 3843 v.reset(OpAMD64BTRQconst) 3844 v.AuxInt = log2(^c) 3845 v.AddArg(x) 3846 return true 3847 } 3848 // match: (ANDQ x (MOVQconst [c])) 3849 // cond: is32Bit(c) 3850 // result: (ANDQconst [c] x) 3851 for { 3852 _ = v.Args[1] 3853 x := v.Args[0] 3854 v_1 := v.Args[1] 3855 if v_1.Op != OpAMD64MOVQconst { 3856 break 3857 } 3858 c := v_1.AuxInt 3859 if !(is32Bit(c)) { 3860 break 3861 } 3862 v.reset(OpAMD64ANDQconst) 3863 v.AuxInt = c 3864 v.AddArg(x) 3865 return true 3866 } 3867 // match: (ANDQ (MOVQconst [c]) x) 3868 // cond: is32Bit(c) 3869 // result: (ANDQconst [c] x) 3870 for { 3871 x := v.Args[1] 3872 v_0 := v.Args[0] 3873 if v_0.Op != OpAMD64MOVQconst { 3874 break 3875 } 3876 c := v_0.AuxInt 3877 if !(is32Bit(c)) { 3878 break 3879 } 3880 v.reset(OpAMD64ANDQconst) 3881 v.AuxInt = c 3882 v.AddArg(x) 3883 return true 3884 } 3885 // match: (ANDQ x x) 3886 // result: x 3887 for { 3888 x := v.Args[1] 3889 if x != v.Args[0] { 3890 break 3891 } 3892 v.reset(OpCopy) 3893 v.Type = x.Type 3894 v.AddArg(x) 3895 return true 3896 } 3897 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 3898 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3899 // result: (ANDQload x [off] {sym} ptr mem) 3900 for { 3901 _ = v.Args[1] 3902 x := v.Args[0] 3903 l := v.Args[1] 3904 if l.Op != OpAMD64MOVQload { 3905 break 3906 } 3907 off := l.AuxInt 3908 sym := l.Aux 3909 mem := l.Args[1] 3910 ptr := l.Args[0] 3911 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3912 break 3913 } 3914 v.reset(OpAMD64ANDQload) 3915 v.AuxInt = off 3916 v.Aux = sym 3917 v.AddArg(x) 3918 v.AddArg(ptr) 3919 v.AddArg(mem) 3920 return true 3921 } 3922 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 3923 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 3924 // result: (ANDQload x [off] {sym} ptr mem) 3925 for { 3926 x := v.Args[1] 3927 l := v.Args[0] 3928 if l.Op != OpAMD64MOVQload { 3929 break 3930 } 3931 off := l.AuxInt 3932 sym := l.Aux 3933 mem := l.Args[1] 3934 ptr := l.Args[0] 3935 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 3936 break 3937 } 3938 v.reset(OpAMD64ANDQload) 3939 v.AuxInt = off 3940 v.Aux = sym 3941 v.AddArg(x) 3942 v.AddArg(ptr) 3943 v.AddArg(mem) 3944 return true 3945 } 3946 return false 3947 } 3948 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { 3949 // match: (ANDQconst [c] x) 3950 // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 3951 // result: (BTRQconst [log2(^c)] x) 3952 for { 3953 c := v.AuxInt 3954 x := v.Args[0] 3955 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) { 3956 break 3957 } 3958 v.reset(OpAMD64BTRQconst) 3959 v.AuxInt = log2(^c) 3960 v.AddArg(x) 3961 return true 3962 } 3963 // match: (ANDQconst [c] (ANDQconst [d] x)) 3964 // result: (ANDQconst [c & d] x) 3965 for { 3966 c := v.AuxInt 3967 v_0 := v.Args[0] 3968 if v_0.Op != OpAMD64ANDQconst { 3969 break 3970 } 3971 d := v_0.AuxInt 3972 x := v_0.Args[0] 3973 v.reset(OpAMD64ANDQconst) 3974 v.AuxInt = c & d 3975 v.AddArg(x) 3976 return true 3977 } 3978 // match: (ANDQconst [c] (BTRQconst [d] x)) 3979 // result: (ANDQconst [c &^ (1<<uint32(d))] x) 3980 for { 3981 c := v.AuxInt 3982 v_0 := v.Args[0] 3983 if v_0.Op != OpAMD64BTRQconst { 3984 break 3985 } 3986 d := v_0.AuxInt 3987 x := v_0.Args[0] 3988 v.reset(OpAMD64ANDQconst) 3989 v.AuxInt = c &^ (1 << uint32(d)) 3990 v.AddArg(x) 3991 return true 3992 } 3993 // match: (ANDQconst [ 0xFF] x) 3994 // result: (MOVBQZX x) 3995 for { 3996 if v.AuxInt != 0xFF { 3997 break 3998 } 3999 x := v.Args[0] 4000 v.reset(OpAMD64MOVBQZX) 4001 v.AddArg(x) 4002 return true 4003 } 4004 // match: (ANDQconst [0xFFFF] x) 4005 // result: (MOVWQZX x) 4006 for { 4007 if v.AuxInt != 0xFFFF { 4008 break 4009 } 4010 x := v.Args[0] 4011 v.reset(OpAMD64MOVWQZX) 4012 v.AddArg(x) 4013 return true 4014 } 4015 // match: (ANDQconst [0xFFFFFFFF] x) 4016 // result: (MOVLQZX x) 4017 for { 4018 if v.AuxInt != 0xFFFFFFFF { 4019 break 4020 } 4021 x := v.Args[0] 4022 v.reset(OpAMD64MOVLQZX) 4023 v.AddArg(x) 4024 return true 4025 } 4026 // match: (ANDQconst [0] _) 4027 // result: (MOVQconst [0]) 4028 for { 4029 if v.AuxInt != 0 { 4030 break 4031 } 4032 v.reset(OpAMD64MOVQconst) 4033 v.AuxInt = 0 4034 return true 4035 } 4036 // match: (ANDQconst [-1] x) 4037 // result: x 4038 for { 4039 if v.AuxInt != -1 { 4040 break 4041 } 4042 x := v.Args[0] 4043 v.reset(OpCopy) 4044 v.Type = x.Type 4045 v.AddArg(x) 4046 return true 4047 } 4048 // match: (ANDQconst [c] (MOVQconst [d])) 4049 // result: (MOVQconst [c&d]) 4050 for { 4051 c := v.AuxInt 4052 v_0 := v.Args[0] 4053 if v_0.Op != OpAMD64MOVQconst { 4054 break 4055 } 4056 d := v_0.AuxInt 4057 v.reset(OpAMD64MOVQconst) 4058 v.AuxInt = c & d 4059 return true 4060 } 4061 return false 4062 } 4063 func rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v *Value) bool { 4064 // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 4065 // cond: ValAndOff(valoff1).canAdd(off2) 4066 // result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 4067 for { 4068 valoff1 := v.AuxInt 4069 sym := v.Aux 4070 mem := v.Args[1] 4071 v_0 := v.Args[0] 4072 if v_0.Op != OpAMD64ADDQconst { 4073 break 4074 } 4075 off2 := v_0.AuxInt 4076 base := v_0.Args[0] 4077 if !(ValAndOff(valoff1).canAdd(off2)) { 4078 break 4079 } 4080 v.reset(OpAMD64ANDQconstmodify) 4081 v.AuxInt = ValAndOff(valoff1).add(off2) 4082 v.Aux = sym 4083 v.AddArg(base) 4084 v.AddArg(mem) 4085 return true 4086 } 4087 // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 4088 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 4089 // result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 4090 for { 4091 valoff1 := v.AuxInt 4092 sym1 := v.Aux 4093 mem := v.Args[1] 4094 v_0 := v.Args[0] 4095 if v_0.Op != OpAMD64LEAQ { 4096 break 4097 } 4098 off2 := v_0.AuxInt 4099 sym2 := v_0.Aux 4100 base := v_0.Args[0] 4101 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 4102 break 4103 } 4104 v.reset(OpAMD64ANDQconstmodify) 4105 v.AuxInt = ValAndOff(valoff1).add(off2) 4106 v.Aux = mergeSym(sym1, sym2) 4107 v.AddArg(base) 4108 v.AddArg(mem) 4109 return true 4110 } 4111 return false 4112 } 4113 func rewriteValueAMD64_OpAMD64ANDQload_0(v *Value) bool { 4114 b := v.Block 4115 typ := &b.Func.Config.Types 4116 // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem) 4117 // cond: is32Bit(off1+off2) 4118 // result: (ANDQload [off1+off2] {sym} val base mem) 4119 for { 4120 off1 := v.AuxInt 4121 sym := v.Aux 4122 mem := v.Args[2] 4123 val := v.Args[0] 4124 v_1 := v.Args[1] 4125 if v_1.Op != OpAMD64ADDQconst { 4126 break 4127 } 4128 off2 := v_1.AuxInt 4129 base := v_1.Args[0] 4130 if !(is32Bit(off1 + off2)) { 4131 break 4132 } 4133 v.reset(OpAMD64ANDQload) 4134 v.AuxInt = off1 + off2 4135 v.Aux = sym 4136 v.AddArg(val) 4137 v.AddArg(base) 4138 v.AddArg(mem) 4139 return true 4140 } 4141 // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 4142 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4143 // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 4144 for { 4145 off1 := v.AuxInt 4146 sym1 := v.Aux 4147 mem := v.Args[2] 4148 val := v.Args[0] 4149 v_1 := v.Args[1] 4150 if v_1.Op != OpAMD64LEAQ { 4151 break 4152 } 4153 off2 := v_1.AuxInt 4154 sym2 := v_1.Aux 4155 base := v_1.Args[0] 4156 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4157 break 4158 } 4159 v.reset(OpAMD64ANDQload) 4160 v.AuxInt = off1 + off2 4161 v.Aux = mergeSym(sym1, sym2) 4162 v.AddArg(val) 4163 v.AddArg(base) 4164 v.AddArg(mem) 4165 return true 4166 } 4167 // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 4168 // result: (ANDQ x (MOVQf2i y)) 4169 for { 4170 off := v.AuxInt 4171 sym := v.Aux 4172 _ = v.Args[2] 4173 x := v.Args[0] 4174 ptr := v.Args[1] 4175 v_2 := v.Args[2] 4176 if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { 4177 break 4178 } 4179 _ = v_2.Args[2] 4180 if ptr != v_2.Args[0] { 4181 break 4182 } 4183 y := v_2.Args[1] 4184 v.reset(OpAMD64ANDQ) 4185 v.AddArg(x) 4186 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 4187 v0.AddArg(y) 4188 v.AddArg(v0) 4189 return true 4190 } 4191 return false 4192 } 4193 func rewriteValueAMD64_OpAMD64ANDQmodify_0(v *Value) bool { 4194 // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 4195 // cond: is32Bit(off1+off2) 4196 // result: (ANDQmodify [off1+off2] {sym} base val mem) 4197 for { 4198 off1 := v.AuxInt 4199 sym := v.Aux 4200 mem := v.Args[2] 4201 v_0 := v.Args[0] 4202 if v_0.Op != OpAMD64ADDQconst { 4203 break 4204 } 4205 off2 := v_0.AuxInt 4206 base := v_0.Args[0] 4207 val := v.Args[1] 4208 if !(is32Bit(off1 + off2)) { 4209 break 4210 } 4211 v.reset(OpAMD64ANDQmodify) 4212 v.AuxInt = off1 + off2 4213 v.Aux = sym 4214 v.AddArg(base) 4215 v.AddArg(val) 4216 v.AddArg(mem) 4217 return true 4218 } 4219 // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4220 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4221 // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4222 for { 4223 off1 := v.AuxInt 4224 sym1 := v.Aux 4225 mem := v.Args[2] 4226 v_0 := v.Args[0] 4227 if v_0.Op != OpAMD64LEAQ { 4228 break 4229 } 4230 off2 := v_0.AuxInt 4231 sym2 := v_0.Aux 4232 base := v_0.Args[0] 4233 val := v.Args[1] 4234 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4235 break 4236 } 4237 v.reset(OpAMD64ANDQmodify) 4238 v.AuxInt = off1 + off2 4239 v.Aux = mergeSym(sym1, sym2) 4240 v.AddArg(base) 4241 v.AddArg(val) 4242 v.AddArg(mem) 4243 return true 4244 } 4245 return false 4246 } 4247 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { 4248 b := v.Block 4249 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 4250 // result: (BSFQ (ORQconst <t> [1<<8] x)) 4251 for { 4252 v_0 := v.Args[0] 4253 if v_0.Op != OpAMD64ORQconst { 4254 break 4255 } 4256 t := v_0.Type 4257 if v_0.AuxInt != 1<<8 { 4258 break 4259 } 4260 v_0_0 := v_0.Args[0] 4261 if v_0_0.Op != OpAMD64MOVBQZX { 4262 break 4263 } 4264 x := v_0_0.Args[0] 4265 v.reset(OpAMD64BSFQ) 4266 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 4267 v0.AuxInt = 1 << 8 4268 v0.AddArg(x) 4269 v.AddArg(v0) 4270 return true 4271 } 4272 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 4273 // result: (BSFQ (ORQconst <t> [1<<16] x)) 4274 for { 4275 v_0 := v.Args[0] 4276 if v_0.Op != OpAMD64ORQconst { 4277 break 4278 } 4279 t := v_0.Type 4280 if v_0.AuxInt != 1<<16 { 4281 break 4282 } 4283 v_0_0 := v_0.Args[0] 4284 if v_0_0.Op != OpAMD64MOVWQZX { 4285 break 4286 } 4287 x := v_0_0.Args[0] 4288 v.reset(OpAMD64BSFQ) 4289 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 4290 v0.AuxInt = 1 << 16 4291 v0.AddArg(x) 4292 v.AddArg(v0) 4293 return true 4294 } 4295 return false 4296 } 4297 func rewriteValueAMD64_OpAMD64BTCLconst_0(v *Value) bool { 4298 // match: (BTCLconst [c] (XORLconst [d] x)) 4299 // result: (XORLconst [d ^ 1<<uint32(c)] x) 4300 for { 4301 c := v.AuxInt 4302 v_0 := v.Args[0] 4303 if v_0.Op != OpAMD64XORLconst { 4304 break 4305 } 4306 d := v_0.AuxInt 4307 x := v_0.Args[0] 4308 v.reset(OpAMD64XORLconst) 4309 v.AuxInt = d ^ 1<<uint32(c) 4310 v.AddArg(x) 4311 return true 4312 } 4313 // match: (BTCLconst [c] (BTCLconst [d] x)) 4314 // result: (XORLconst [1<<uint32(c) ^ 1<<uint32(d)] x) 4315 for { 4316 c := v.AuxInt 4317 v_0 := v.Args[0] 4318 if v_0.Op != OpAMD64BTCLconst { 4319 break 4320 } 4321 d := v_0.AuxInt 4322 x := v_0.Args[0] 4323 v.reset(OpAMD64XORLconst) 4324 v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d) 4325 v.AddArg(x) 4326 return true 4327 } 4328 // match: (BTCLconst [c] (MOVLconst [d])) 4329 // result: (MOVLconst [d^(1<<uint32(c))]) 4330 for { 4331 c := v.AuxInt 4332 v_0 := v.Args[0] 4333 if v_0.Op != OpAMD64MOVLconst { 4334 break 4335 } 4336 d := v_0.AuxInt 4337 v.reset(OpAMD64MOVLconst) 4338 v.AuxInt = d ^ (1 << uint32(c)) 4339 return true 4340 } 4341 return false 4342 } 4343 func rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v *Value) bool { 4344 // match: (BTCLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 4345 // cond: ValAndOff(valoff1).canAdd(off2) 4346 // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 4347 for { 4348 valoff1 := v.AuxInt 4349 sym := v.Aux 4350 mem := v.Args[1] 4351 v_0 := v.Args[0] 4352 if v_0.Op != OpAMD64ADDQconst { 4353 break 4354 } 4355 off2 := v_0.AuxInt 4356 base := v_0.Args[0] 4357 if !(ValAndOff(valoff1).canAdd(off2)) { 4358 break 4359 } 4360 v.reset(OpAMD64BTCLconstmodify) 4361 v.AuxInt = ValAndOff(valoff1).add(off2) 4362 v.Aux = sym 4363 v.AddArg(base) 4364 v.AddArg(mem) 4365 return true 4366 } 4367 // match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 4368 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 4369 // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 4370 for { 4371 valoff1 := v.AuxInt 4372 sym1 := v.Aux 4373 mem := v.Args[1] 4374 v_0 := v.Args[0] 4375 if v_0.Op != OpAMD64LEAQ { 4376 break 4377 } 4378 off2 := v_0.AuxInt 4379 sym2 := v_0.Aux 4380 base := v_0.Args[0] 4381 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 4382 break 4383 } 4384 v.reset(OpAMD64BTCLconstmodify) 4385 v.AuxInt = ValAndOff(valoff1).add(off2) 4386 v.Aux = mergeSym(sym1, sym2) 4387 v.AddArg(base) 4388 v.AddArg(mem) 4389 return true 4390 } 4391 return false 4392 } 4393 func rewriteValueAMD64_OpAMD64BTCLmodify_0(v *Value) bool { 4394 // match: (BTCLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 4395 // cond: is32Bit(off1+off2) 4396 // result: (BTCLmodify [off1+off2] {sym} base val mem) 4397 for { 4398 off1 := v.AuxInt 4399 sym := v.Aux 4400 mem := v.Args[2] 4401 v_0 := v.Args[0] 4402 if v_0.Op != OpAMD64ADDQconst { 4403 break 4404 } 4405 off2 := v_0.AuxInt 4406 base := v_0.Args[0] 4407 val := v.Args[1] 4408 if !(is32Bit(off1 + off2)) { 4409 break 4410 } 4411 v.reset(OpAMD64BTCLmodify) 4412 v.AuxInt = off1 + off2 4413 v.Aux = sym 4414 v.AddArg(base) 4415 v.AddArg(val) 4416 v.AddArg(mem) 4417 return true 4418 } 4419 // match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4420 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4421 // result: (BTCLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4422 for { 4423 off1 := v.AuxInt 4424 sym1 := v.Aux 4425 mem := v.Args[2] 4426 v_0 := v.Args[0] 4427 if v_0.Op != OpAMD64LEAQ { 4428 break 4429 } 4430 off2 := v_0.AuxInt 4431 sym2 := v_0.Aux 4432 base := v_0.Args[0] 4433 val := v.Args[1] 4434 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4435 break 4436 } 4437 v.reset(OpAMD64BTCLmodify) 4438 v.AuxInt = off1 + off2 4439 v.Aux = mergeSym(sym1, sym2) 4440 v.AddArg(base) 4441 v.AddArg(val) 4442 v.AddArg(mem) 4443 return true 4444 } 4445 return false 4446 } 4447 func rewriteValueAMD64_OpAMD64BTCQconst_0(v *Value) bool { 4448 // match: (BTCQconst [c] (XORQconst [d] x)) 4449 // result: (XORQconst [d ^ 1<<uint32(c)] x) 4450 for { 4451 c := v.AuxInt 4452 v_0 := v.Args[0] 4453 if v_0.Op != OpAMD64XORQconst { 4454 break 4455 } 4456 d := v_0.AuxInt 4457 x := v_0.Args[0] 4458 v.reset(OpAMD64XORQconst) 4459 v.AuxInt = d ^ 1<<uint32(c) 4460 v.AddArg(x) 4461 return true 4462 } 4463 // match: (BTCQconst [c] (BTCQconst [d] x)) 4464 // result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x) 4465 for { 4466 c := v.AuxInt 4467 v_0 := v.Args[0] 4468 if v_0.Op != OpAMD64BTCQconst { 4469 break 4470 } 4471 d := v_0.AuxInt 4472 x := v_0.Args[0] 4473 v.reset(OpAMD64XORQconst) 4474 v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d) 4475 v.AddArg(x) 4476 return true 4477 } 4478 // match: (BTCQconst [c] (MOVQconst [d])) 4479 // result: (MOVQconst [d^(1<<uint32(c))]) 4480 for { 4481 c := v.AuxInt 4482 v_0 := v.Args[0] 4483 if v_0.Op != OpAMD64MOVQconst { 4484 break 4485 } 4486 d := v_0.AuxInt 4487 v.reset(OpAMD64MOVQconst) 4488 v.AuxInt = d ^ (1 << uint32(c)) 4489 return true 4490 } 4491 return false 4492 } 4493 func rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v *Value) bool { 4494 // match: (BTCQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 4495 // cond: ValAndOff(valoff1).canAdd(off2) 4496 // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 4497 for { 4498 valoff1 := v.AuxInt 4499 sym := v.Aux 4500 mem := v.Args[1] 4501 v_0 := v.Args[0] 4502 if v_0.Op != OpAMD64ADDQconst { 4503 break 4504 } 4505 off2 := v_0.AuxInt 4506 base := v_0.Args[0] 4507 if !(ValAndOff(valoff1).canAdd(off2)) { 4508 break 4509 } 4510 v.reset(OpAMD64BTCQconstmodify) 4511 v.AuxInt = ValAndOff(valoff1).add(off2) 4512 v.Aux = sym 4513 v.AddArg(base) 4514 v.AddArg(mem) 4515 return true 4516 } 4517 // match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 4518 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 4519 // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 4520 for { 4521 valoff1 := v.AuxInt 4522 sym1 := v.Aux 4523 mem := v.Args[1] 4524 v_0 := v.Args[0] 4525 if v_0.Op != OpAMD64LEAQ { 4526 break 4527 } 4528 off2 := v_0.AuxInt 4529 sym2 := v_0.Aux 4530 base := v_0.Args[0] 4531 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 4532 break 4533 } 4534 v.reset(OpAMD64BTCQconstmodify) 4535 v.AuxInt = ValAndOff(valoff1).add(off2) 4536 v.Aux = mergeSym(sym1, sym2) 4537 v.AddArg(base) 4538 v.AddArg(mem) 4539 return true 4540 } 4541 return false 4542 } 4543 func rewriteValueAMD64_OpAMD64BTCQmodify_0(v *Value) bool { 4544 // match: (BTCQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 4545 // cond: is32Bit(off1+off2) 4546 // result: (BTCQmodify [off1+off2] {sym} base val mem) 4547 for { 4548 off1 := v.AuxInt 4549 sym := v.Aux 4550 mem := v.Args[2] 4551 v_0 := v.Args[0] 4552 if v_0.Op != OpAMD64ADDQconst { 4553 break 4554 } 4555 off2 := v_0.AuxInt 4556 base := v_0.Args[0] 4557 val := v.Args[1] 4558 if !(is32Bit(off1 + off2)) { 4559 break 4560 } 4561 v.reset(OpAMD64BTCQmodify) 4562 v.AuxInt = off1 + off2 4563 v.Aux = sym 4564 v.AddArg(base) 4565 v.AddArg(val) 4566 v.AddArg(mem) 4567 return true 4568 } 4569 // match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4570 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4571 // result: (BTCQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4572 for { 4573 off1 := v.AuxInt 4574 sym1 := v.Aux 4575 mem := v.Args[2] 4576 v_0 := v.Args[0] 4577 if v_0.Op != OpAMD64LEAQ { 4578 break 4579 } 4580 off2 := v_0.AuxInt 4581 sym2 := v_0.Aux 4582 base := v_0.Args[0] 4583 val := v.Args[1] 4584 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4585 break 4586 } 4587 v.reset(OpAMD64BTCQmodify) 4588 v.AuxInt = off1 + off2 4589 v.Aux = mergeSym(sym1, sym2) 4590 v.AddArg(base) 4591 v.AddArg(val) 4592 v.AddArg(mem) 4593 return true 4594 } 4595 return false 4596 } 4597 func rewriteValueAMD64_OpAMD64BTLconst_0(v *Value) bool { 4598 // match: (BTLconst [c] (SHRQconst [d] x)) 4599 // cond: (c+d)<64 4600 // result: (BTQconst [c+d] x) 4601 for { 4602 c := v.AuxInt 4603 v_0 := v.Args[0] 4604 if v_0.Op != OpAMD64SHRQconst { 4605 break 4606 } 4607 d := v_0.AuxInt 4608 x := v_0.Args[0] 4609 if !((c + d) < 64) { 4610 break 4611 } 4612 v.reset(OpAMD64BTQconst) 4613 v.AuxInt = c + d 4614 v.AddArg(x) 4615 return true 4616 } 4617 // match: (BTLconst [c] (SHLQconst [d] x)) 4618 // cond: c>d 4619 // result: (BTLconst [c-d] x) 4620 for { 4621 c := v.AuxInt 4622 v_0 := v.Args[0] 4623 if v_0.Op != OpAMD64SHLQconst { 4624 break 4625 } 4626 d := v_0.AuxInt 4627 x := v_0.Args[0] 4628 if !(c > d) { 4629 break 4630 } 4631 v.reset(OpAMD64BTLconst) 4632 v.AuxInt = c - d 4633 v.AddArg(x) 4634 return true 4635 } 4636 // match: (BTLconst [0] s:(SHRQ x y)) 4637 // result: (BTQ y x) 4638 for { 4639 if v.AuxInt != 0 { 4640 break 4641 } 4642 s := v.Args[0] 4643 if s.Op != OpAMD64SHRQ { 4644 break 4645 } 4646 y := s.Args[1] 4647 x := s.Args[0] 4648 v.reset(OpAMD64BTQ) 4649 v.AddArg(y) 4650 v.AddArg(x) 4651 return true 4652 } 4653 // match: (BTLconst [c] (SHRLconst [d] x)) 4654 // cond: (c+d)<32 4655 // result: (BTLconst [c+d] x) 4656 for { 4657 c := v.AuxInt 4658 v_0 := v.Args[0] 4659 if v_0.Op != OpAMD64SHRLconst { 4660 break 4661 } 4662 d := v_0.AuxInt 4663 x := v_0.Args[0] 4664 if !((c + d) < 32) { 4665 break 4666 } 4667 v.reset(OpAMD64BTLconst) 4668 v.AuxInt = c + d 4669 v.AddArg(x) 4670 return true 4671 } 4672 // match: (BTLconst [c] (SHLLconst [d] x)) 4673 // cond: c>d 4674 // result: (BTLconst [c-d] x) 4675 for { 4676 c := v.AuxInt 4677 v_0 := v.Args[0] 4678 if v_0.Op != OpAMD64SHLLconst { 4679 break 4680 } 4681 d := v_0.AuxInt 4682 x := v_0.Args[0] 4683 if !(c > d) { 4684 break 4685 } 4686 v.reset(OpAMD64BTLconst) 4687 v.AuxInt = c - d 4688 v.AddArg(x) 4689 return true 4690 } 4691 // match: (BTLconst [0] s:(SHRL x y)) 4692 // result: (BTL y x) 4693 for { 4694 if v.AuxInt != 0 { 4695 break 4696 } 4697 s := v.Args[0] 4698 if s.Op != OpAMD64SHRL { 4699 break 4700 } 4701 y := s.Args[1] 4702 x := s.Args[0] 4703 v.reset(OpAMD64BTL) 4704 v.AddArg(y) 4705 v.AddArg(x) 4706 return true 4707 } 4708 return false 4709 } 4710 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { 4711 // match: (BTQconst [c] (SHRQconst [d] x)) 4712 // cond: (c+d)<64 4713 // result: (BTQconst [c+d] x) 4714 for { 4715 c := v.AuxInt 4716 v_0 := v.Args[0] 4717 if v_0.Op != OpAMD64SHRQconst { 4718 break 4719 } 4720 d := v_0.AuxInt 4721 x := v_0.Args[0] 4722 if !((c + d) < 64) { 4723 break 4724 } 4725 v.reset(OpAMD64BTQconst) 4726 v.AuxInt = c + d 4727 v.AddArg(x) 4728 return true 4729 } 4730 // match: (BTQconst [c] (SHLQconst [d] x)) 4731 // cond: c>d 4732 // result: (BTQconst [c-d] x) 4733 for { 4734 c := v.AuxInt 4735 v_0 := v.Args[0] 4736 if v_0.Op != OpAMD64SHLQconst { 4737 break 4738 } 4739 d := v_0.AuxInt 4740 x := v_0.Args[0] 4741 if !(c > d) { 4742 break 4743 } 4744 v.reset(OpAMD64BTQconst) 4745 v.AuxInt = c - d 4746 v.AddArg(x) 4747 return true 4748 } 4749 // match: (BTQconst [0] s:(SHRQ x y)) 4750 // result: (BTQ y x) 4751 for { 4752 if v.AuxInt != 0 { 4753 break 4754 } 4755 s := v.Args[0] 4756 if s.Op != OpAMD64SHRQ { 4757 break 4758 } 4759 y := s.Args[1] 4760 x := s.Args[0] 4761 v.reset(OpAMD64BTQ) 4762 v.AddArg(y) 4763 v.AddArg(x) 4764 return true 4765 } 4766 return false 4767 } 4768 func rewriteValueAMD64_OpAMD64BTRLconst_0(v *Value) bool { 4769 // match: (BTRLconst [c] (BTSLconst [c] x)) 4770 // result: (BTRLconst [c] x) 4771 for { 4772 c := v.AuxInt 4773 v_0 := v.Args[0] 4774 if v_0.Op != OpAMD64BTSLconst || v_0.AuxInt != c { 4775 break 4776 } 4777 x := v_0.Args[0] 4778 v.reset(OpAMD64BTRLconst) 4779 v.AuxInt = c 4780 v.AddArg(x) 4781 return true 4782 } 4783 // match: (BTRLconst [c] (BTCLconst [c] x)) 4784 // result: (BTRLconst [c] x) 4785 for { 4786 c := v.AuxInt 4787 v_0 := v.Args[0] 4788 if v_0.Op != OpAMD64BTCLconst || v_0.AuxInt != c { 4789 break 4790 } 4791 x := v_0.Args[0] 4792 v.reset(OpAMD64BTRLconst) 4793 v.AuxInt = c 4794 v.AddArg(x) 4795 return true 4796 } 4797 // match: (BTRLconst [c] (ANDLconst [d] x)) 4798 // result: (ANDLconst [d &^ (1<<uint32(c))] x) 4799 for { 4800 c := v.AuxInt 4801 v_0 := v.Args[0] 4802 if v_0.Op != OpAMD64ANDLconst { 4803 break 4804 } 4805 d := v_0.AuxInt 4806 x := v_0.Args[0] 4807 v.reset(OpAMD64ANDLconst) 4808 v.AuxInt = d &^ (1 << uint32(c)) 4809 v.AddArg(x) 4810 return true 4811 } 4812 // match: (BTRLconst [c] (BTRLconst [d] x)) 4813 // result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x) 4814 for { 4815 c := v.AuxInt 4816 v_0 := v.Args[0] 4817 if v_0.Op != OpAMD64BTRLconst { 4818 break 4819 } 4820 d := v_0.AuxInt 4821 x := v_0.Args[0] 4822 v.reset(OpAMD64ANDLconst) 4823 v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d)) 4824 v.AddArg(x) 4825 return true 4826 } 4827 // match: (BTRLconst [c] (MOVLconst [d])) 4828 // result: (MOVLconst [d&^(1<<uint32(c))]) 4829 for { 4830 c := v.AuxInt 4831 v_0 := v.Args[0] 4832 if v_0.Op != OpAMD64MOVLconst { 4833 break 4834 } 4835 d := v_0.AuxInt 4836 v.reset(OpAMD64MOVLconst) 4837 v.AuxInt = d &^ (1 << uint32(c)) 4838 return true 4839 } 4840 return false 4841 } 4842 func rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v *Value) bool { 4843 // match: (BTRLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 4844 // cond: ValAndOff(valoff1).canAdd(off2) 4845 // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 4846 for { 4847 valoff1 := v.AuxInt 4848 sym := v.Aux 4849 mem := v.Args[1] 4850 v_0 := v.Args[0] 4851 if v_0.Op != OpAMD64ADDQconst { 4852 break 4853 } 4854 off2 := v_0.AuxInt 4855 base := v_0.Args[0] 4856 if !(ValAndOff(valoff1).canAdd(off2)) { 4857 break 4858 } 4859 v.reset(OpAMD64BTRLconstmodify) 4860 v.AuxInt = ValAndOff(valoff1).add(off2) 4861 v.Aux = sym 4862 v.AddArg(base) 4863 v.AddArg(mem) 4864 return true 4865 } 4866 // match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 4867 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 4868 // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 4869 for { 4870 valoff1 := v.AuxInt 4871 sym1 := v.Aux 4872 mem := v.Args[1] 4873 v_0 := v.Args[0] 4874 if v_0.Op != OpAMD64LEAQ { 4875 break 4876 } 4877 off2 := v_0.AuxInt 4878 sym2 := v_0.Aux 4879 base := v_0.Args[0] 4880 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 4881 break 4882 } 4883 v.reset(OpAMD64BTRLconstmodify) 4884 v.AuxInt = ValAndOff(valoff1).add(off2) 4885 v.Aux = mergeSym(sym1, sym2) 4886 v.AddArg(base) 4887 v.AddArg(mem) 4888 return true 4889 } 4890 return false 4891 } 4892 func rewriteValueAMD64_OpAMD64BTRLmodify_0(v *Value) bool { 4893 // match: (BTRLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 4894 // cond: is32Bit(off1+off2) 4895 // result: (BTRLmodify [off1+off2] {sym} base val mem) 4896 for { 4897 off1 := v.AuxInt 4898 sym := v.Aux 4899 mem := v.Args[2] 4900 v_0 := v.Args[0] 4901 if v_0.Op != OpAMD64ADDQconst { 4902 break 4903 } 4904 off2 := v_0.AuxInt 4905 base := v_0.Args[0] 4906 val := v.Args[1] 4907 if !(is32Bit(off1 + off2)) { 4908 break 4909 } 4910 v.reset(OpAMD64BTRLmodify) 4911 v.AuxInt = off1 + off2 4912 v.Aux = sym 4913 v.AddArg(base) 4914 v.AddArg(val) 4915 v.AddArg(mem) 4916 return true 4917 } 4918 // match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4919 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4920 // result: (BTRLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4921 for { 4922 off1 := v.AuxInt 4923 sym1 := v.Aux 4924 mem := v.Args[2] 4925 v_0 := v.Args[0] 4926 if v_0.Op != OpAMD64LEAQ { 4927 break 4928 } 4929 off2 := v_0.AuxInt 4930 sym2 := v_0.Aux 4931 base := v_0.Args[0] 4932 val := v.Args[1] 4933 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4934 break 4935 } 4936 v.reset(OpAMD64BTRLmodify) 4937 v.AuxInt = off1 + off2 4938 v.Aux = mergeSym(sym1, sym2) 4939 v.AddArg(base) 4940 v.AddArg(val) 4941 v.AddArg(mem) 4942 return true 4943 } 4944 return false 4945 } 4946 func rewriteValueAMD64_OpAMD64BTRQconst_0(v *Value) bool { 4947 // match: (BTRQconst [c] (BTSQconst [c] x)) 4948 // result: (BTRQconst [c] x) 4949 for { 4950 c := v.AuxInt 4951 v_0 := v.Args[0] 4952 if v_0.Op != OpAMD64BTSQconst || v_0.AuxInt != c { 4953 break 4954 } 4955 x := v_0.Args[0] 4956 v.reset(OpAMD64BTRQconst) 4957 v.AuxInt = c 4958 v.AddArg(x) 4959 return true 4960 } 4961 // match: (BTRQconst [c] (BTCQconst [c] x)) 4962 // result: (BTRQconst [c] x) 4963 for { 4964 c := v.AuxInt 4965 v_0 := v.Args[0] 4966 if v_0.Op != OpAMD64BTCQconst || v_0.AuxInt != c { 4967 break 4968 } 4969 x := v_0.Args[0] 4970 v.reset(OpAMD64BTRQconst) 4971 v.AuxInt = c 4972 v.AddArg(x) 4973 return true 4974 } 4975 // match: (BTRQconst [c] (ANDQconst [d] x)) 4976 // result: (ANDQconst [d &^ (1<<uint32(c))] x) 4977 for { 4978 c := v.AuxInt 4979 v_0 := v.Args[0] 4980 if v_0.Op != OpAMD64ANDQconst { 4981 break 4982 } 4983 d := v_0.AuxInt 4984 x := v_0.Args[0] 4985 v.reset(OpAMD64ANDQconst) 4986 v.AuxInt = d &^ (1 << uint32(c)) 4987 v.AddArg(x) 4988 return true 4989 } 4990 // match: (BTRQconst [c] (BTRQconst [d] x)) 4991 // result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x) 4992 for { 4993 c := v.AuxInt 4994 v_0 := v.Args[0] 4995 if v_0.Op != OpAMD64BTRQconst { 4996 break 4997 } 4998 d := v_0.AuxInt 4999 x := v_0.Args[0] 5000 v.reset(OpAMD64ANDQconst) 5001 v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d)) 5002 v.AddArg(x) 5003 return true 5004 } 5005 // match: (BTRQconst [c] (MOVQconst [d])) 5006 // result: (MOVQconst [d&^(1<<uint32(c))]) 5007 for { 5008 c := v.AuxInt 5009 v_0 := v.Args[0] 5010 if v_0.Op != OpAMD64MOVQconst { 5011 break 5012 } 5013 d := v_0.AuxInt 5014 v.reset(OpAMD64MOVQconst) 5015 v.AuxInt = d &^ (1 << uint32(c)) 5016 return true 5017 } 5018 return false 5019 } 5020 func rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v *Value) bool { 5021 // match: (BTRQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 5022 // cond: ValAndOff(valoff1).canAdd(off2) 5023 // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 5024 for { 5025 valoff1 := v.AuxInt 5026 sym := v.Aux 5027 mem := v.Args[1] 5028 v_0 := v.Args[0] 5029 if v_0.Op != OpAMD64ADDQconst { 5030 break 5031 } 5032 off2 := v_0.AuxInt 5033 base := v_0.Args[0] 5034 if !(ValAndOff(valoff1).canAdd(off2)) { 5035 break 5036 } 5037 v.reset(OpAMD64BTRQconstmodify) 5038 v.AuxInt = ValAndOff(valoff1).add(off2) 5039 v.Aux = sym 5040 v.AddArg(base) 5041 v.AddArg(mem) 5042 return true 5043 } 5044 // match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 5045 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 5046 // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 5047 for { 5048 valoff1 := v.AuxInt 5049 sym1 := v.Aux 5050 mem := v.Args[1] 5051 v_0 := v.Args[0] 5052 if v_0.Op != OpAMD64LEAQ { 5053 break 5054 } 5055 off2 := v_0.AuxInt 5056 sym2 := v_0.Aux 5057 base := v_0.Args[0] 5058 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 5059 break 5060 } 5061 v.reset(OpAMD64BTRQconstmodify) 5062 v.AuxInt = ValAndOff(valoff1).add(off2) 5063 v.Aux = mergeSym(sym1, sym2) 5064 v.AddArg(base) 5065 v.AddArg(mem) 5066 return true 5067 } 5068 return false 5069 } 5070 func rewriteValueAMD64_OpAMD64BTRQmodify_0(v *Value) bool { 5071 // match: (BTRQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 5072 // cond: is32Bit(off1+off2) 5073 // result: (BTRQmodify [off1+off2] {sym} base val mem) 5074 for { 5075 off1 := v.AuxInt 5076 sym := v.Aux 5077 mem := v.Args[2] 5078 v_0 := v.Args[0] 5079 if v_0.Op != OpAMD64ADDQconst { 5080 break 5081 } 5082 off2 := v_0.AuxInt 5083 base := v_0.Args[0] 5084 val := v.Args[1] 5085 if !(is32Bit(off1 + off2)) { 5086 break 5087 } 5088 v.reset(OpAMD64BTRQmodify) 5089 v.AuxInt = off1 + off2 5090 v.Aux = sym 5091 v.AddArg(base) 5092 v.AddArg(val) 5093 v.AddArg(mem) 5094 return true 5095 } 5096 // match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5097 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5098 // result: (BTRQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5099 for { 5100 off1 := v.AuxInt 5101 sym1 := v.Aux 5102 mem := v.Args[2] 5103 v_0 := v.Args[0] 5104 if v_0.Op != OpAMD64LEAQ { 5105 break 5106 } 5107 off2 := v_0.AuxInt 5108 sym2 := v_0.Aux 5109 base := v_0.Args[0] 5110 val := v.Args[1] 5111 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5112 break 5113 } 5114 v.reset(OpAMD64BTRQmodify) 5115 v.AuxInt = off1 + off2 5116 v.Aux = mergeSym(sym1, sym2) 5117 v.AddArg(base) 5118 v.AddArg(val) 5119 v.AddArg(mem) 5120 return true 5121 } 5122 return false 5123 } 5124 func rewriteValueAMD64_OpAMD64BTSLconst_0(v *Value) bool { 5125 // match: (BTSLconst [c] (BTRLconst [c] x)) 5126 // result: (BTSLconst [c] x) 5127 for { 5128 c := v.AuxInt 5129 v_0 := v.Args[0] 5130 if v_0.Op != OpAMD64BTRLconst || v_0.AuxInt != c { 5131 break 5132 } 5133 x := v_0.Args[0] 5134 v.reset(OpAMD64BTSLconst) 5135 v.AuxInt = c 5136 v.AddArg(x) 5137 return true 5138 } 5139 // match: (BTSLconst [c] (BTCLconst [c] x)) 5140 // result: (BTSLconst [c] x) 5141 for { 5142 c := v.AuxInt 5143 v_0 := v.Args[0] 5144 if v_0.Op != OpAMD64BTCLconst || v_0.AuxInt != c { 5145 break 5146 } 5147 x := v_0.Args[0] 5148 v.reset(OpAMD64BTSLconst) 5149 v.AuxInt = c 5150 v.AddArg(x) 5151 return true 5152 } 5153 // match: (BTSLconst [c] (ORLconst [d] x)) 5154 // result: (ORLconst [d | 1<<uint32(c)] x) 5155 for { 5156 c := v.AuxInt 5157 v_0 := v.Args[0] 5158 if v_0.Op != OpAMD64ORLconst { 5159 break 5160 } 5161 d := v_0.AuxInt 5162 x := v_0.Args[0] 5163 v.reset(OpAMD64ORLconst) 5164 v.AuxInt = d | 1<<uint32(c) 5165 v.AddArg(x) 5166 return true 5167 } 5168 // match: (BTSLconst [c] (BTSLconst [d] x)) 5169 // result: (ORLconst [1<<uint32(d) | 1<<uint32(c)] x) 5170 for { 5171 c := v.AuxInt 5172 v_0 := v.Args[0] 5173 if v_0.Op != OpAMD64BTSLconst { 5174 break 5175 } 5176 d := v_0.AuxInt 5177 x := v_0.Args[0] 5178 v.reset(OpAMD64ORLconst) 5179 v.AuxInt = 1<<uint32(d) | 1<<uint32(c) 5180 v.AddArg(x) 5181 return true 5182 } 5183 // match: (BTSLconst [c] (MOVLconst [d])) 5184 // result: (MOVLconst [d|(1<<uint32(c))]) 5185 for { 5186 c := v.AuxInt 5187 v_0 := v.Args[0] 5188 if v_0.Op != OpAMD64MOVLconst { 5189 break 5190 } 5191 d := v_0.AuxInt 5192 v.reset(OpAMD64MOVLconst) 5193 v.AuxInt = d | (1 << uint32(c)) 5194 return true 5195 } 5196 return false 5197 } 5198 func rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v *Value) bool { 5199 // match: (BTSLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 5200 // cond: ValAndOff(valoff1).canAdd(off2) 5201 // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 5202 for { 5203 valoff1 := v.AuxInt 5204 sym := v.Aux 5205 mem := v.Args[1] 5206 v_0 := v.Args[0] 5207 if v_0.Op != OpAMD64ADDQconst { 5208 break 5209 } 5210 off2 := v_0.AuxInt 5211 base := v_0.Args[0] 5212 if !(ValAndOff(valoff1).canAdd(off2)) { 5213 break 5214 } 5215 v.reset(OpAMD64BTSLconstmodify) 5216 v.AuxInt = ValAndOff(valoff1).add(off2) 5217 v.Aux = sym 5218 v.AddArg(base) 5219 v.AddArg(mem) 5220 return true 5221 } 5222 // match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 5223 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 5224 // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 5225 for { 5226 valoff1 := v.AuxInt 5227 sym1 := v.Aux 5228 mem := v.Args[1] 5229 v_0 := v.Args[0] 5230 if v_0.Op != OpAMD64LEAQ { 5231 break 5232 } 5233 off2 := v_0.AuxInt 5234 sym2 := v_0.Aux 5235 base := v_0.Args[0] 5236 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 5237 break 5238 } 5239 v.reset(OpAMD64BTSLconstmodify) 5240 v.AuxInt = ValAndOff(valoff1).add(off2) 5241 v.Aux = mergeSym(sym1, sym2) 5242 v.AddArg(base) 5243 v.AddArg(mem) 5244 return true 5245 } 5246 return false 5247 } 5248 func rewriteValueAMD64_OpAMD64BTSLmodify_0(v *Value) bool { 5249 // match: (BTSLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 5250 // cond: is32Bit(off1+off2) 5251 // result: (BTSLmodify [off1+off2] {sym} base val mem) 5252 for { 5253 off1 := v.AuxInt 5254 sym := v.Aux 5255 mem := v.Args[2] 5256 v_0 := v.Args[0] 5257 if v_0.Op != OpAMD64ADDQconst { 5258 break 5259 } 5260 off2 := v_0.AuxInt 5261 base := v_0.Args[0] 5262 val := v.Args[1] 5263 if !(is32Bit(off1 + off2)) { 5264 break 5265 } 5266 v.reset(OpAMD64BTSLmodify) 5267 v.AuxInt = off1 + off2 5268 v.Aux = sym 5269 v.AddArg(base) 5270 v.AddArg(val) 5271 v.AddArg(mem) 5272 return true 5273 } 5274 // match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5275 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5276 // result: (BTSLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5277 for { 5278 off1 := v.AuxInt 5279 sym1 := v.Aux 5280 mem := v.Args[2] 5281 v_0 := v.Args[0] 5282 if v_0.Op != OpAMD64LEAQ { 5283 break 5284 } 5285 off2 := v_0.AuxInt 5286 sym2 := v_0.Aux 5287 base := v_0.Args[0] 5288 val := v.Args[1] 5289 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5290 break 5291 } 5292 v.reset(OpAMD64BTSLmodify) 5293 v.AuxInt = off1 + off2 5294 v.Aux = mergeSym(sym1, sym2) 5295 v.AddArg(base) 5296 v.AddArg(val) 5297 v.AddArg(mem) 5298 return true 5299 } 5300 return false 5301 } 5302 func rewriteValueAMD64_OpAMD64BTSQconst_0(v *Value) bool { 5303 // match: (BTSQconst [c] (BTRQconst [c] x)) 5304 // result: (BTSQconst [c] x) 5305 for { 5306 c := v.AuxInt 5307 v_0 := v.Args[0] 5308 if v_0.Op != OpAMD64BTRQconst || v_0.AuxInt != c { 5309 break 5310 } 5311 x := v_0.Args[0] 5312 v.reset(OpAMD64BTSQconst) 5313 v.AuxInt = c 5314 v.AddArg(x) 5315 return true 5316 } 5317 // match: (BTSQconst [c] (BTCQconst [c] x)) 5318 // result: (BTSQconst [c] x) 5319 for { 5320 c := v.AuxInt 5321 v_0 := v.Args[0] 5322 if v_0.Op != OpAMD64BTCQconst || v_0.AuxInt != c { 5323 break 5324 } 5325 x := v_0.Args[0] 5326 v.reset(OpAMD64BTSQconst) 5327 v.AuxInt = c 5328 v.AddArg(x) 5329 return true 5330 } 5331 // match: (BTSQconst [c] (ORQconst [d] x)) 5332 // result: (ORQconst [d | 1<<uint32(c)] x) 5333 for { 5334 c := v.AuxInt 5335 v_0 := v.Args[0] 5336 if v_0.Op != OpAMD64ORQconst { 5337 break 5338 } 5339 d := v_0.AuxInt 5340 x := v_0.Args[0] 5341 v.reset(OpAMD64ORQconst) 5342 v.AuxInt = d | 1<<uint32(c) 5343 v.AddArg(x) 5344 return true 5345 } 5346 // match: (BTSQconst [c] (BTSQconst [d] x)) 5347 // result: (ORQconst [1<<uint32(d) | 1<<uint32(c)] x) 5348 for { 5349 c := v.AuxInt 5350 v_0 := v.Args[0] 5351 if v_0.Op != OpAMD64BTSQconst { 5352 break 5353 } 5354 d := v_0.AuxInt 5355 x := v_0.Args[0] 5356 v.reset(OpAMD64ORQconst) 5357 v.AuxInt = 1<<uint32(d) | 1<<uint32(c) 5358 v.AddArg(x) 5359 return true 5360 } 5361 // match: (BTSQconst [c] (MOVQconst [d])) 5362 // result: (MOVQconst [d|(1<<uint32(c))]) 5363 for { 5364 c := v.AuxInt 5365 v_0 := v.Args[0] 5366 if v_0.Op != OpAMD64MOVQconst { 5367 break 5368 } 5369 d := v_0.AuxInt 5370 v.reset(OpAMD64MOVQconst) 5371 v.AuxInt = d | (1 << uint32(c)) 5372 return true 5373 } 5374 return false 5375 } 5376 func rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v *Value) bool { 5377 // match: (BTSQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 5378 // cond: ValAndOff(valoff1).canAdd(off2) 5379 // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 5380 for { 5381 valoff1 := v.AuxInt 5382 sym := v.Aux 5383 mem := v.Args[1] 5384 v_0 := v.Args[0] 5385 if v_0.Op != OpAMD64ADDQconst { 5386 break 5387 } 5388 off2 := v_0.AuxInt 5389 base := v_0.Args[0] 5390 if !(ValAndOff(valoff1).canAdd(off2)) { 5391 break 5392 } 5393 v.reset(OpAMD64BTSQconstmodify) 5394 v.AuxInt = ValAndOff(valoff1).add(off2) 5395 v.Aux = sym 5396 v.AddArg(base) 5397 v.AddArg(mem) 5398 return true 5399 } 5400 // match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 5401 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 5402 // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 5403 for { 5404 valoff1 := v.AuxInt 5405 sym1 := v.Aux 5406 mem := v.Args[1] 5407 v_0 := v.Args[0] 5408 if v_0.Op != OpAMD64LEAQ { 5409 break 5410 } 5411 off2 := v_0.AuxInt 5412 sym2 := v_0.Aux 5413 base := v_0.Args[0] 5414 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 5415 break 5416 } 5417 v.reset(OpAMD64BTSQconstmodify) 5418 v.AuxInt = ValAndOff(valoff1).add(off2) 5419 v.Aux = mergeSym(sym1, sym2) 5420 v.AddArg(base) 5421 v.AddArg(mem) 5422 return true 5423 } 5424 return false 5425 } 5426 func rewriteValueAMD64_OpAMD64BTSQmodify_0(v *Value) bool { 5427 // match: (BTSQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 5428 // cond: is32Bit(off1+off2) 5429 // result: (BTSQmodify [off1+off2] {sym} base val mem) 5430 for { 5431 off1 := v.AuxInt 5432 sym := v.Aux 5433 mem := v.Args[2] 5434 v_0 := v.Args[0] 5435 if v_0.Op != OpAMD64ADDQconst { 5436 break 5437 } 5438 off2 := v_0.AuxInt 5439 base := v_0.Args[0] 5440 val := v.Args[1] 5441 if !(is32Bit(off1 + off2)) { 5442 break 5443 } 5444 v.reset(OpAMD64BTSQmodify) 5445 v.AuxInt = off1 + off2 5446 v.Aux = sym 5447 v.AddArg(base) 5448 v.AddArg(val) 5449 v.AddArg(mem) 5450 return true 5451 } 5452 // match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5453 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5454 // result: (BTSQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5455 for { 5456 off1 := v.AuxInt 5457 sym1 := v.Aux 5458 mem := v.Args[2] 5459 v_0 := v.Args[0] 5460 if v_0.Op != OpAMD64LEAQ { 5461 break 5462 } 5463 off2 := v_0.AuxInt 5464 sym2 := v_0.Aux 5465 base := v_0.Args[0] 5466 val := v.Args[1] 5467 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5468 break 5469 } 5470 v.reset(OpAMD64BTSQmodify) 5471 v.AuxInt = off1 + off2 5472 v.Aux = mergeSym(sym1, sym2) 5473 v.AddArg(base) 5474 v.AddArg(val) 5475 v.AddArg(mem) 5476 return true 5477 } 5478 return false 5479 } 5480 func rewriteValueAMD64_OpAMD64CMOVLCC_0(v *Value) bool { 5481 // match: (CMOVLCC x y (InvertFlags cond)) 5482 // result: (CMOVLLS x y cond) 5483 for { 5484 _ = v.Args[2] 5485 x := v.Args[0] 5486 y := v.Args[1] 5487 v_2 := v.Args[2] 5488 if v_2.Op != OpAMD64InvertFlags { 5489 break 5490 } 5491 cond := v_2.Args[0] 5492 v.reset(OpAMD64CMOVLLS) 5493 v.AddArg(x) 5494 v.AddArg(y) 5495 v.AddArg(cond) 5496 return true 5497 } 5498 // match: (CMOVLCC _ x (FlagEQ)) 5499 // result: x 5500 for { 5501 _ = v.Args[2] 5502 x := v.Args[1] 5503 v_2 := v.Args[2] 5504 if v_2.Op != OpAMD64FlagEQ { 5505 break 5506 } 5507 v.reset(OpCopy) 5508 v.Type = x.Type 5509 v.AddArg(x) 5510 return true 5511 } 5512 // match: (CMOVLCC _ x (FlagGT_UGT)) 5513 // result: x 5514 for { 5515 _ = v.Args[2] 5516 x := v.Args[1] 5517 v_2 := v.Args[2] 5518 if v_2.Op != OpAMD64FlagGT_UGT { 5519 break 5520 } 5521 v.reset(OpCopy) 5522 v.Type = x.Type 5523 v.AddArg(x) 5524 return true 5525 } 5526 // match: (CMOVLCC y _ (FlagGT_ULT)) 5527 // result: y 5528 for { 5529 _ = v.Args[2] 5530 y := v.Args[0] 5531 v_2 := v.Args[2] 5532 if v_2.Op != OpAMD64FlagGT_ULT { 5533 break 5534 } 5535 v.reset(OpCopy) 5536 v.Type = y.Type 5537 v.AddArg(y) 5538 return true 5539 } 5540 // match: (CMOVLCC y _ (FlagLT_ULT)) 5541 // result: y 5542 for { 5543 _ = v.Args[2] 5544 y := v.Args[0] 5545 v_2 := v.Args[2] 5546 if v_2.Op != OpAMD64FlagLT_ULT { 5547 break 5548 } 5549 v.reset(OpCopy) 5550 v.Type = y.Type 5551 v.AddArg(y) 5552 return true 5553 } 5554 // match: (CMOVLCC _ x (FlagLT_UGT)) 5555 // result: x 5556 for { 5557 _ = v.Args[2] 5558 x := v.Args[1] 5559 v_2 := v.Args[2] 5560 if v_2.Op != OpAMD64FlagLT_UGT { 5561 break 5562 } 5563 v.reset(OpCopy) 5564 v.Type = x.Type 5565 v.AddArg(x) 5566 return true 5567 } 5568 return false 5569 } 5570 func rewriteValueAMD64_OpAMD64CMOVLCS_0(v *Value) bool { 5571 // match: (CMOVLCS x y (InvertFlags cond)) 5572 // result: (CMOVLHI x y cond) 5573 for { 5574 _ = v.Args[2] 5575 x := v.Args[0] 5576 y := v.Args[1] 5577 v_2 := v.Args[2] 5578 if v_2.Op != OpAMD64InvertFlags { 5579 break 5580 } 5581 cond := v_2.Args[0] 5582 v.reset(OpAMD64CMOVLHI) 5583 v.AddArg(x) 5584 v.AddArg(y) 5585 v.AddArg(cond) 5586 return true 5587 } 5588 // match: (CMOVLCS y _ (FlagEQ)) 5589 // result: y 5590 for { 5591 _ = v.Args[2] 5592 y := v.Args[0] 5593 v_2 := v.Args[2] 5594 if v_2.Op != OpAMD64FlagEQ { 5595 break 5596 } 5597 v.reset(OpCopy) 5598 v.Type = y.Type 5599 v.AddArg(y) 5600 return true 5601 } 5602 // match: (CMOVLCS y _ (FlagGT_UGT)) 5603 // result: y 5604 for { 5605 _ = v.Args[2] 5606 y := v.Args[0] 5607 v_2 := v.Args[2] 5608 if v_2.Op != OpAMD64FlagGT_UGT { 5609 break 5610 } 5611 v.reset(OpCopy) 5612 v.Type = y.Type 5613 v.AddArg(y) 5614 return true 5615 } 5616 // match: (CMOVLCS _ x (FlagGT_ULT)) 5617 // result: x 5618 for { 5619 _ = v.Args[2] 5620 x := v.Args[1] 5621 v_2 := v.Args[2] 5622 if v_2.Op != OpAMD64FlagGT_ULT { 5623 break 5624 } 5625 v.reset(OpCopy) 5626 v.Type = x.Type 5627 v.AddArg(x) 5628 return true 5629 } 5630 // match: (CMOVLCS _ x (FlagLT_ULT)) 5631 // result: x 5632 for { 5633 _ = v.Args[2] 5634 x := v.Args[1] 5635 v_2 := v.Args[2] 5636 if v_2.Op != OpAMD64FlagLT_ULT { 5637 break 5638 } 5639 v.reset(OpCopy) 5640 v.Type = x.Type 5641 v.AddArg(x) 5642 return true 5643 } 5644 // match: (CMOVLCS y _ (FlagLT_UGT)) 5645 // result: y 5646 for { 5647 _ = v.Args[2] 5648 y := v.Args[0] 5649 v_2 := v.Args[2] 5650 if v_2.Op != OpAMD64FlagLT_UGT { 5651 break 5652 } 5653 v.reset(OpCopy) 5654 v.Type = y.Type 5655 v.AddArg(y) 5656 return true 5657 } 5658 return false 5659 } 5660 func rewriteValueAMD64_OpAMD64CMOVLEQ_0(v *Value) bool { 5661 // match: (CMOVLEQ x y (InvertFlags cond)) 5662 // result: (CMOVLEQ x y cond) 5663 for { 5664 _ = v.Args[2] 5665 x := v.Args[0] 5666 y := v.Args[1] 5667 v_2 := v.Args[2] 5668 if v_2.Op != OpAMD64InvertFlags { 5669 break 5670 } 5671 cond := v_2.Args[0] 5672 v.reset(OpAMD64CMOVLEQ) 5673 v.AddArg(x) 5674 v.AddArg(y) 5675 v.AddArg(cond) 5676 return true 5677 } 5678 // match: (CMOVLEQ _ x (FlagEQ)) 5679 // result: x 5680 for { 5681 _ = v.Args[2] 5682 x := v.Args[1] 5683 v_2 := v.Args[2] 5684 if v_2.Op != OpAMD64FlagEQ { 5685 break 5686 } 5687 v.reset(OpCopy) 5688 v.Type = x.Type 5689 v.AddArg(x) 5690 return true 5691 } 5692 // match: (CMOVLEQ y _ (FlagGT_UGT)) 5693 // result: y 5694 for { 5695 _ = v.Args[2] 5696 y := v.Args[0] 5697 v_2 := v.Args[2] 5698 if v_2.Op != OpAMD64FlagGT_UGT { 5699 break 5700 } 5701 v.reset(OpCopy) 5702 v.Type = y.Type 5703 v.AddArg(y) 5704 return true 5705 } 5706 // match: (CMOVLEQ y _ (FlagGT_ULT)) 5707 // result: y 5708 for { 5709 _ = v.Args[2] 5710 y := v.Args[0] 5711 v_2 := v.Args[2] 5712 if v_2.Op != OpAMD64FlagGT_ULT { 5713 break 5714 } 5715 v.reset(OpCopy) 5716 v.Type = y.Type 5717 v.AddArg(y) 5718 return true 5719 } 5720 // match: (CMOVLEQ y _ (FlagLT_ULT)) 5721 // result: y 5722 for { 5723 _ = v.Args[2] 5724 y := v.Args[0] 5725 v_2 := v.Args[2] 5726 if v_2.Op != OpAMD64FlagLT_ULT { 5727 break 5728 } 5729 v.reset(OpCopy) 5730 v.Type = y.Type 5731 v.AddArg(y) 5732 return true 5733 } 5734 // match: (CMOVLEQ y _ (FlagLT_UGT)) 5735 // result: y 5736 for { 5737 _ = v.Args[2] 5738 y := v.Args[0] 5739 v_2 := v.Args[2] 5740 if v_2.Op != OpAMD64FlagLT_UGT { 5741 break 5742 } 5743 v.reset(OpCopy) 5744 v.Type = y.Type 5745 v.AddArg(y) 5746 return true 5747 } 5748 return false 5749 } 5750 func rewriteValueAMD64_OpAMD64CMOVLGE_0(v *Value) bool { 5751 // match: (CMOVLGE x y (InvertFlags cond)) 5752 // result: (CMOVLLE x y cond) 5753 for { 5754 _ = v.Args[2] 5755 x := v.Args[0] 5756 y := v.Args[1] 5757 v_2 := v.Args[2] 5758 if v_2.Op != OpAMD64InvertFlags { 5759 break 5760 } 5761 cond := v_2.Args[0] 5762 v.reset(OpAMD64CMOVLLE) 5763 v.AddArg(x) 5764 v.AddArg(y) 5765 v.AddArg(cond) 5766 return true 5767 } 5768 // match: (CMOVLGE _ x (FlagEQ)) 5769 // result: x 5770 for { 5771 _ = v.Args[2] 5772 x := v.Args[1] 5773 v_2 := v.Args[2] 5774 if v_2.Op != OpAMD64FlagEQ { 5775 break 5776 } 5777 v.reset(OpCopy) 5778 v.Type = x.Type 5779 v.AddArg(x) 5780 return true 5781 } 5782 // match: (CMOVLGE _ x (FlagGT_UGT)) 5783 // result: x 5784 for { 5785 _ = v.Args[2] 5786 x := v.Args[1] 5787 v_2 := v.Args[2] 5788 if v_2.Op != OpAMD64FlagGT_UGT { 5789 break 5790 } 5791 v.reset(OpCopy) 5792 v.Type = x.Type 5793 v.AddArg(x) 5794 return true 5795 } 5796 // match: (CMOVLGE _ x (FlagGT_ULT)) 5797 // result: x 5798 for { 5799 _ = v.Args[2] 5800 x := v.Args[1] 5801 v_2 := v.Args[2] 5802 if v_2.Op != OpAMD64FlagGT_ULT { 5803 break 5804 } 5805 v.reset(OpCopy) 5806 v.Type = x.Type 5807 v.AddArg(x) 5808 return true 5809 } 5810 // match: (CMOVLGE y _ (FlagLT_ULT)) 5811 // result: y 5812 for { 5813 _ = v.Args[2] 5814 y := v.Args[0] 5815 v_2 := v.Args[2] 5816 if v_2.Op != OpAMD64FlagLT_ULT { 5817 break 5818 } 5819 v.reset(OpCopy) 5820 v.Type = y.Type 5821 v.AddArg(y) 5822 return true 5823 } 5824 // match: (CMOVLGE y _ (FlagLT_UGT)) 5825 // result: y 5826 for { 5827 _ = v.Args[2] 5828 y := v.Args[0] 5829 v_2 := v.Args[2] 5830 if v_2.Op != OpAMD64FlagLT_UGT { 5831 break 5832 } 5833 v.reset(OpCopy) 5834 v.Type = y.Type 5835 v.AddArg(y) 5836 return true 5837 } 5838 return false 5839 } 5840 func rewriteValueAMD64_OpAMD64CMOVLGT_0(v *Value) bool { 5841 // match: (CMOVLGT x y (InvertFlags cond)) 5842 // result: (CMOVLLT x y cond) 5843 for { 5844 _ = v.Args[2] 5845 x := v.Args[0] 5846 y := v.Args[1] 5847 v_2 := v.Args[2] 5848 if v_2.Op != OpAMD64InvertFlags { 5849 break 5850 } 5851 cond := v_2.Args[0] 5852 v.reset(OpAMD64CMOVLLT) 5853 v.AddArg(x) 5854 v.AddArg(y) 5855 v.AddArg(cond) 5856 return true 5857 } 5858 // match: (CMOVLGT y _ (FlagEQ)) 5859 // result: y 5860 for { 5861 _ = v.Args[2] 5862 y := v.Args[0] 5863 v_2 := v.Args[2] 5864 if v_2.Op != OpAMD64FlagEQ { 5865 break 5866 } 5867 v.reset(OpCopy) 5868 v.Type = y.Type 5869 v.AddArg(y) 5870 return true 5871 } 5872 // match: (CMOVLGT _ x (FlagGT_UGT)) 5873 // result: x 5874 for { 5875 _ = v.Args[2] 5876 x := v.Args[1] 5877 v_2 := v.Args[2] 5878 if v_2.Op != OpAMD64FlagGT_UGT { 5879 break 5880 } 5881 v.reset(OpCopy) 5882 v.Type = x.Type 5883 v.AddArg(x) 5884 return true 5885 } 5886 // match: (CMOVLGT _ x (FlagGT_ULT)) 5887 // result: x 5888 for { 5889 _ = v.Args[2] 5890 x := v.Args[1] 5891 v_2 := v.Args[2] 5892 if v_2.Op != OpAMD64FlagGT_ULT { 5893 break 5894 } 5895 v.reset(OpCopy) 5896 v.Type = x.Type 5897 v.AddArg(x) 5898 return true 5899 } 5900 // match: (CMOVLGT y _ (FlagLT_ULT)) 5901 // result: y 5902 for { 5903 _ = v.Args[2] 5904 y := v.Args[0] 5905 v_2 := v.Args[2] 5906 if v_2.Op != OpAMD64FlagLT_ULT { 5907 break 5908 } 5909 v.reset(OpCopy) 5910 v.Type = y.Type 5911 v.AddArg(y) 5912 return true 5913 } 5914 // match: (CMOVLGT y _ (FlagLT_UGT)) 5915 // result: y 5916 for { 5917 _ = v.Args[2] 5918 y := v.Args[0] 5919 v_2 := v.Args[2] 5920 if v_2.Op != OpAMD64FlagLT_UGT { 5921 break 5922 } 5923 v.reset(OpCopy) 5924 v.Type = y.Type 5925 v.AddArg(y) 5926 return true 5927 } 5928 return false 5929 } 5930 func rewriteValueAMD64_OpAMD64CMOVLHI_0(v *Value) bool { 5931 // match: (CMOVLHI x y (InvertFlags cond)) 5932 // result: (CMOVLCS x y cond) 5933 for { 5934 _ = v.Args[2] 5935 x := v.Args[0] 5936 y := v.Args[1] 5937 v_2 := v.Args[2] 5938 if v_2.Op != OpAMD64InvertFlags { 5939 break 5940 } 5941 cond := v_2.Args[0] 5942 v.reset(OpAMD64CMOVLCS) 5943 v.AddArg(x) 5944 v.AddArg(y) 5945 v.AddArg(cond) 5946 return true 5947 } 5948 // match: (CMOVLHI y _ (FlagEQ)) 5949 // result: y 5950 for { 5951 _ = v.Args[2] 5952 y := v.Args[0] 5953 v_2 := v.Args[2] 5954 if v_2.Op != OpAMD64FlagEQ { 5955 break 5956 } 5957 v.reset(OpCopy) 5958 v.Type = y.Type 5959 v.AddArg(y) 5960 return true 5961 } 5962 // match: (CMOVLHI _ x (FlagGT_UGT)) 5963 // result: x 5964 for { 5965 _ = v.Args[2] 5966 x := v.Args[1] 5967 v_2 := v.Args[2] 5968 if v_2.Op != OpAMD64FlagGT_UGT { 5969 break 5970 } 5971 v.reset(OpCopy) 5972 v.Type = x.Type 5973 v.AddArg(x) 5974 return true 5975 } 5976 // match: (CMOVLHI y _ (FlagGT_ULT)) 5977 // result: y 5978 for { 5979 _ = v.Args[2] 5980 y := v.Args[0] 5981 v_2 := v.Args[2] 5982 if v_2.Op != OpAMD64FlagGT_ULT { 5983 break 5984 } 5985 v.reset(OpCopy) 5986 v.Type = y.Type 5987 v.AddArg(y) 5988 return true 5989 } 5990 // match: (CMOVLHI y _ (FlagLT_ULT)) 5991 // result: y 5992 for { 5993 _ = v.Args[2] 5994 y := v.Args[0] 5995 v_2 := v.Args[2] 5996 if v_2.Op != OpAMD64FlagLT_ULT { 5997 break 5998 } 5999 v.reset(OpCopy) 6000 v.Type = y.Type 6001 v.AddArg(y) 6002 return true 6003 } 6004 // match: (CMOVLHI _ x (FlagLT_UGT)) 6005 // result: x 6006 for { 6007 _ = v.Args[2] 6008 x := v.Args[1] 6009 v_2 := v.Args[2] 6010 if v_2.Op != OpAMD64FlagLT_UGT { 6011 break 6012 } 6013 v.reset(OpCopy) 6014 v.Type = x.Type 6015 v.AddArg(x) 6016 return true 6017 } 6018 return false 6019 } 6020 func rewriteValueAMD64_OpAMD64CMOVLLE_0(v *Value) bool { 6021 // match: (CMOVLLE x y (InvertFlags cond)) 6022 // result: (CMOVLGE x y cond) 6023 for { 6024 _ = v.Args[2] 6025 x := v.Args[0] 6026 y := v.Args[1] 6027 v_2 := v.Args[2] 6028 if v_2.Op != OpAMD64InvertFlags { 6029 break 6030 } 6031 cond := v_2.Args[0] 6032 v.reset(OpAMD64CMOVLGE) 6033 v.AddArg(x) 6034 v.AddArg(y) 6035 v.AddArg(cond) 6036 return true 6037 } 6038 // match: (CMOVLLE _ x (FlagEQ)) 6039 // result: x 6040 for { 6041 _ = v.Args[2] 6042 x := v.Args[1] 6043 v_2 := v.Args[2] 6044 if v_2.Op != OpAMD64FlagEQ { 6045 break 6046 } 6047 v.reset(OpCopy) 6048 v.Type = x.Type 6049 v.AddArg(x) 6050 return true 6051 } 6052 // match: (CMOVLLE y _ (FlagGT_UGT)) 6053 // result: y 6054 for { 6055 _ = v.Args[2] 6056 y := v.Args[0] 6057 v_2 := v.Args[2] 6058 if v_2.Op != OpAMD64FlagGT_UGT { 6059 break 6060 } 6061 v.reset(OpCopy) 6062 v.Type = y.Type 6063 v.AddArg(y) 6064 return true 6065 } 6066 // match: (CMOVLLE y _ (FlagGT_ULT)) 6067 // result: y 6068 for { 6069 _ = v.Args[2] 6070 y := v.Args[0] 6071 v_2 := v.Args[2] 6072 if v_2.Op != OpAMD64FlagGT_ULT { 6073 break 6074 } 6075 v.reset(OpCopy) 6076 v.Type = y.Type 6077 v.AddArg(y) 6078 return true 6079 } 6080 // match: (CMOVLLE _ x (FlagLT_ULT)) 6081 // result: x 6082 for { 6083 _ = v.Args[2] 6084 x := v.Args[1] 6085 v_2 := v.Args[2] 6086 if v_2.Op != OpAMD64FlagLT_ULT { 6087 break 6088 } 6089 v.reset(OpCopy) 6090 v.Type = x.Type 6091 v.AddArg(x) 6092 return true 6093 } 6094 // match: (CMOVLLE _ x (FlagLT_UGT)) 6095 // result: x 6096 for { 6097 _ = v.Args[2] 6098 x := v.Args[1] 6099 v_2 := v.Args[2] 6100 if v_2.Op != OpAMD64FlagLT_UGT { 6101 break 6102 } 6103 v.reset(OpCopy) 6104 v.Type = x.Type 6105 v.AddArg(x) 6106 return true 6107 } 6108 return false 6109 } 6110 func rewriteValueAMD64_OpAMD64CMOVLLS_0(v *Value) bool { 6111 // match: (CMOVLLS x y (InvertFlags cond)) 6112 // result: (CMOVLCC x y cond) 6113 for { 6114 _ = v.Args[2] 6115 x := v.Args[0] 6116 y := v.Args[1] 6117 v_2 := v.Args[2] 6118 if v_2.Op != OpAMD64InvertFlags { 6119 break 6120 } 6121 cond := v_2.Args[0] 6122 v.reset(OpAMD64CMOVLCC) 6123 v.AddArg(x) 6124 v.AddArg(y) 6125 v.AddArg(cond) 6126 return true 6127 } 6128 // match: (CMOVLLS _ x (FlagEQ)) 6129 // result: x 6130 for { 6131 _ = v.Args[2] 6132 x := v.Args[1] 6133 v_2 := v.Args[2] 6134 if v_2.Op != OpAMD64FlagEQ { 6135 break 6136 } 6137 v.reset(OpCopy) 6138 v.Type = x.Type 6139 v.AddArg(x) 6140 return true 6141 } 6142 // match: (CMOVLLS y _ (FlagGT_UGT)) 6143 // result: y 6144 for { 6145 _ = v.Args[2] 6146 y := v.Args[0] 6147 v_2 := v.Args[2] 6148 if v_2.Op != OpAMD64FlagGT_UGT { 6149 break 6150 } 6151 v.reset(OpCopy) 6152 v.Type = y.Type 6153 v.AddArg(y) 6154 return true 6155 } 6156 // match: (CMOVLLS _ x (FlagGT_ULT)) 6157 // result: x 6158 for { 6159 _ = v.Args[2] 6160 x := v.Args[1] 6161 v_2 := v.Args[2] 6162 if v_2.Op != OpAMD64FlagGT_ULT { 6163 break 6164 } 6165 v.reset(OpCopy) 6166 v.Type = x.Type 6167 v.AddArg(x) 6168 return true 6169 } 6170 // match: (CMOVLLS _ x (FlagLT_ULT)) 6171 // result: x 6172 for { 6173 _ = v.Args[2] 6174 x := v.Args[1] 6175 v_2 := v.Args[2] 6176 if v_2.Op != OpAMD64FlagLT_ULT { 6177 break 6178 } 6179 v.reset(OpCopy) 6180 v.Type = x.Type 6181 v.AddArg(x) 6182 return true 6183 } 6184 // match: (CMOVLLS y _ (FlagLT_UGT)) 6185 // result: y 6186 for { 6187 _ = v.Args[2] 6188 y := v.Args[0] 6189 v_2 := v.Args[2] 6190 if v_2.Op != OpAMD64FlagLT_UGT { 6191 break 6192 } 6193 v.reset(OpCopy) 6194 v.Type = y.Type 6195 v.AddArg(y) 6196 return true 6197 } 6198 return false 6199 } 6200 func rewriteValueAMD64_OpAMD64CMOVLLT_0(v *Value) bool { 6201 // match: (CMOVLLT x y (InvertFlags cond)) 6202 // result: (CMOVLGT x y cond) 6203 for { 6204 _ = v.Args[2] 6205 x := v.Args[0] 6206 y := v.Args[1] 6207 v_2 := v.Args[2] 6208 if v_2.Op != OpAMD64InvertFlags { 6209 break 6210 } 6211 cond := v_2.Args[0] 6212 v.reset(OpAMD64CMOVLGT) 6213 v.AddArg(x) 6214 v.AddArg(y) 6215 v.AddArg(cond) 6216 return true 6217 } 6218 // match: (CMOVLLT y _ (FlagEQ)) 6219 // result: y 6220 for { 6221 _ = v.Args[2] 6222 y := v.Args[0] 6223 v_2 := v.Args[2] 6224 if v_2.Op != OpAMD64FlagEQ { 6225 break 6226 } 6227 v.reset(OpCopy) 6228 v.Type = y.Type 6229 v.AddArg(y) 6230 return true 6231 } 6232 // match: (CMOVLLT y _ (FlagGT_UGT)) 6233 // result: y 6234 for { 6235 _ = v.Args[2] 6236 y := v.Args[0] 6237 v_2 := v.Args[2] 6238 if v_2.Op != OpAMD64FlagGT_UGT { 6239 break 6240 } 6241 v.reset(OpCopy) 6242 v.Type = y.Type 6243 v.AddArg(y) 6244 return true 6245 } 6246 // match: (CMOVLLT y _ (FlagGT_ULT)) 6247 // result: y 6248 for { 6249 _ = v.Args[2] 6250 y := v.Args[0] 6251 v_2 := v.Args[2] 6252 if v_2.Op != OpAMD64FlagGT_ULT { 6253 break 6254 } 6255 v.reset(OpCopy) 6256 v.Type = y.Type 6257 v.AddArg(y) 6258 return true 6259 } 6260 // match: (CMOVLLT _ x (FlagLT_ULT)) 6261 // result: x 6262 for { 6263 _ = v.Args[2] 6264 x := v.Args[1] 6265 v_2 := v.Args[2] 6266 if v_2.Op != OpAMD64FlagLT_ULT { 6267 break 6268 } 6269 v.reset(OpCopy) 6270 v.Type = x.Type 6271 v.AddArg(x) 6272 return true 6273 } 6274 // match: (CMOVLLT _ x (FlagLT_UGT)) 6275 // result: x 6276 for { 6277 _ = v.Args[2] 6278 x := v.Args[1] 6279 v_2 := v.Args[2] 6280 if v_2.Op != OpAMD64FlagLT_UGT { 6281 break 6282 } 6283 v.reset(OpCopy) 6284 v.Type = x.Type 6285 v.AddArg(x) 6286 return true 6287 } 6288 return false 6289 } 6290 func rewriteValueAMD64_OpAMD64CMOVLNE_0(v *Value) bool { 6291 // match: (CMOVLNE x y (InvertFlags cond)) 6292 // result: (CMOVLNE x y cond) 6293 for { 6294 _ = v.Args[2] 6295 x := v.Args[0] 6296 y := v.Args[1] 6297 v_2 := v.Args[2] 6298 if v_2.Op != OpAMD64InvertFlags { 6299 break 6300 } 6301 cond := v_2.Args[0] 6302 v.reset(OpAMD64CMOVLNE) 6303 v.AddArg(x) 6304 v.AddArg(y) 6305 v.AddArg(cond) 6306 return true 6307 } 6308 // match: (CMOVLNE y _ (FlagEQ)) 6309 // result: y 6310 for { 6311 _ = v.Args[2] 6312 y := v.Args[0] 6313 v_2 := v.Args[2] 6314 if v_2.Op != OpAMD64FlagEQ { 6315 break 6316 } 6317 v.reset(OpCopy) 6318 v.Type = y.Type 6319 v.AddArg(y) 6320 return true 6321 } 6322 // match: (CMOVLNE _ x (FlagGT_UGT)) 6323 // result: x 6324 for { 6325 _ = v.Args[2] 6326 x := v.Args[1] 6327 v_2 := v.Args[2] 6328 if v_2.Op != OpAMD64FlagGT_UGT { 6329 break 6330 } 6331 v.reset(OpCopy) 6332 v.Type = x.Type 6333 v.AddArg(x) 6334 return true 6335 } 6336 // match: (CMOVLNE _ x (FlagGT_ULT)) 6337 // result: x 6338 for { 6339 _ = v.Args[2] 6340 x := v.Args[1] 6341 v_2 := v.Args[2] 6342 if v_2.Op != OpAMD64FlagGT_ULT { 6343 break 6344 } 6345 v.reset(OpCopy) 6346 v.Type = x.Type 6347 v.AddArg(x) 6348 return true 6349 } 6350 // match: (CMOVLNE _ x (FlagLT_ULT)) 6351 // result: x 6352 for { 6353 _ = v.Args[2] 6354 x := v.Args[1] 6355 v_2 := v.Args[2] 6356 if v_2.Op != OpAMD64FlagLT_ULT { 6357 break 6358 } 6359 v.reset(OpCopy) 6360 v.Type = x.Type 6361 v.AddArg(x) 6362 return true 6363 } 6364 // match: (CMOVLNE _ x (FlagLT_UGT)) 6365 // result: x 6366 for { 6367 _ = v.Args[2] 6368 x := v.Args[1] 6369 v_2 := v.Args[2] 6370 if v_2.Op != OpAMD64FlagLT_UGT { 6371 break 6372 } 6373 v.reset(OpCopy) 6374 v.Type = x.Type 6375 v.AddArg(x) 6376 return true 6377 } 6378 return false 6379 } 6380 func rewriteValueAMD64_OpAMD64CMOVQCC_0(v *Value) bool { 6381 // match: (CMOVQCC x y (InvertFlags cond)) 6382 // result: (CMOVQLS x y cond) 6383 for { 6384 _ = v.Args[2] 6385 x := v.Args[0] 6386 y := v.Args[1] 6387 v_2 := v.Args[2] 6388 if v_2.Op != OpAMD64InvertFlags { 6389 break 6390 } 6391 cond := v_2.Args[0] 6392 v.reset(OpAMD64CMOVQLS) 6393 v.AddArg(x) 6394 v.AddArg(y) 6395 v.AddArg(cond) 6396 return true 6397 } 6398 // match: (CMOVQCC _ x (FlagEQ)) 6399 // result: x 6400 for { 6401 _ = v.Args[2] 6402 x := v.Args[1] 6403 v_2 := v.Args[2] 6404 if v_2.Op != OpAMD64FlagEQ { 6405 break 6406 } 6407 v.reset(OpCopy) 6408 v.Type = x.Type 6409 v.AddArg(x) 6410 return true 6411 } 6412 // match: (CMOVQCC _ x (FlagGT_UGT)) 6413 // result: x 6414 for { 6415 _ = v.Args[2] 6416 x := v.Args[1] 6417 v_2 := v.Args[2] 6418 if v_2.Op != OpAMD64FlagGT_UGT { 6419 break 6420 } 6421 v.reset(OpCopy) 6422 v.Type = x.Type 6423 v.AddArg(x) 6424 return true 6425 } 6426 // match: (CMOVQCC y _ (FlagGT_ULT)) 6427 // result: y 6428 for { 6429 _ = v.Args[2] 6430 y := v.Args[0] 6431 v_2 := v.Args[2] 6432 if v_2.Op != OpAMD64FlagGT_ULT { 6433 break 6434 } 6435 v.reset(OpCopy) 6436 v.Type = y.Type 6437 v.AddArg(y) 6438 return true 6439 } 6440 // match: (CMOVQCC y _ (FlagLT_ULT)) 6441 // result: y 6442 for { 6443 _ = v.Args[2] 6444 y := v.Args[0] 6445 v_2 := v.Args[2] 6446 if v_2.Op != OpAMD64FlagLT_ULT { 6447 break 6448 } 6449 v.reset(OpCopy) 6450 v.Type = y.Type 6451 v.AddArg(y) 6452 return true 6453 } 6454 // match: (CMOVQCC _ x (FlagLT_UGT)) 6455 // result: x 6456 for { 6457 _ = v.Args[2] 6458 x := v.Args[1] 6459 v_2 := v.Args[2] 6460 if v_2.Op != OpAMD64FlagLT_UGT { 6461 break 6462 } 6463 v.reset(OpCopy) 6464 v.Type = x.Type 6465 v.AddArg(x) 6466 return true 6467 } 6468 return false 6469 } 6470 func rewriteValueAMD64_OpAMD64CMOVQCS_0(v *Value) bool { 6471 // match: (CMOVQCS x y (InvertFlags cond)) 6472 // result: (CMOVQHI x y cond) 6473 for { 6474 _ = v.Args[2] 6475 x := v.Args[0] 6476 y := v.Args[1] 6477 v_2 := v.Args[2] 6478 if v_2.Op != OpAMD64InvertFlags { 6479 break 6480 } 6481 cond := v_2.Args[0] 6482 v.reset(OpAMD64CMOVQHI) 6483 v.AddArg(x) 6484 v.AddArg(y) 6485 v.AddArg(cond) 6486 return true 6487 } 6488 // match: (CMOVQCS y _ (FlagEQ)) 6489 // result: y 6490 for { 6491 _ = v.Args[2] 6492 y := v.Args[0] 6493 v_2 := v.Args[2] 6494 if v_2.Op != OpAMD64FlagEQ { 6495 break 6496 } 6497 v.reset(OpCopy) 6498 v.Type = y.Type 6499 v.AddArg(y) 6500 return true 6501 } 6502 // match: (CMOVQCS y _ (FlagGT_UGT)) 6503 // result: y 6504 for { 6505 _ = v.Args[2] 6506 y := v.Args[0] 6507 v_2 := v.Args[2] 6508 if v_2.Op != OpAMD64FlagGT_UGT { 6509 break 6510 } 6511 v.reset(OpCopy) 6512 v.Type = y.Type 6513 v.AddArg(y) 6514 return true 6515 } 6516 // match: (CMOVQCS _ x (FlagGT_ULT)) 6517 // result: x 6518 for { 6519 _ = v.Args[2] 6520 x := v.Args[1] 6521 v_2 := v.Args[2] 6522 if v_2.Op != OpAMD64FlagGT_ULT { 6523 break 6524 } 6525 v.reset(OpCopy) 6526 v.Type = x.Type 6527 v.AddArg(x) 6528 return true 6529 } 6530 // match: (CMOVQCS _ x (FlagLT_ULT)) 6531 // result: x 6532 for { 6533 _ = v.Args[2] 6534 x := v.Args[1] 6535 v_2 := v.Args[2] 6536 if v_2.Op != OpAMD64FlagLT_ULT { 6537 break 6538 } 6539 v.reset(OpCopy) 6540 v.Type = x.Type 6541 v.AddArg(x) 6542 return true 6543 } 6544 // match: (CMOVQCS y _ (FlagLT_UGT)) 6545 // result: y 6546 for { 6547 _ = v.Args[2] 6548 y := v.Args[0] 6549 v_2 := v.Args[2] 6550 if v_2.Op != OpAMD64FlagLT_UGT { 6551 break 6552 } 6553 v.reset(OpCopy) 6554 v.Type = y.Type 6555 v.AddArg(y) 6556 return true 6557 } 6558 return false 6559 } 6560 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { 6561 // match: (CMOVQEQ x y (InvertFlags cond)) 6562 // result: (CMOVQEQ x y cond) 6563 for { 6564 _ = v.Args[2] 6565 x := v.Args[0] 6566 y := v.Args[1] 6567 v_2 := v.Args[2] 6568 if v_2.Op != OpAMD64InvertFlags { 6569 break 6570 } 6571 cond := v_2.Args[0] 6572 v.reset(OpAMD64CMOVQEQ) 6573 v.AddArg(x) 6574 v.AddArg(y) 6575 v.AddArg(cond) 6576 return true 6577 } 6578 // match: (CMOVQEQ _ x (FlagEQ)) 6579 // result: x 6580 for { 6581 _ = v.Args[2] 6582 x := v.Args[1] 6583 v_2 := v.Args[2] 6584 if v_2.Op != OpAMD64FlagEQ { 6585 break 6586 } 6587 v.reset(OpCopy) 6588 v.Type = x.Type 6589 v.AddArg(x) 6590 return true 6591 } 6592 // match: (CMOVQEQ y _ (FlagGT_UGT)) 6593 // result: y 6594 for { 6595 _ = v.Args[2] 6596 y := v.Args[0] 6597 v_2 := v.Args[2] 6598 if v_2.Op != OpAMD64FlagGT_UGT { 6599 break 6600 } 6601 v.reset(OpCopy) 6602 v.Type = y.Type 6603 v.AddArg(y) 6604 return true 6605 } 6606 // match: (CMOVQEQ y _ (FlagGT_ULT)) 6607 // result: y 6608 for { 6609 _ = v.Args[2] 6610 y := v.Args[0] 6611 v_2 := v.Args[2] 6612 if v_2.Op != OpAMD64FlagGT_ULT { 6613 break 6614 } 6615 v.reset(OpCopy) 6616 v.Type = y.Type 6617 v.AddArg(y) 6618 return true 6619 } 6620 // match: (CMOVQEQ y _ (FlagLT_ULT)) 6621 // result: y 6622 for { 6623 _ = v.Args[2] 6624 y := v.Args[0] 6625 v_2 := v.Args[2] 6626 if v_2.Op != OpAMD64FlagLT_ULT { 6627 break 6628 } 6629 v.reset(OpCopy) 6630 v.Type = y.Type 6631 v.AddArg(y) 6632 return true 6633 } 6634 // match: (CMOVQEQ y _ (FlagLT_UGT)) 6635 // result: y 6636 for { 6637 _ = v.Args[2] 6638 y := v.Args[0] 6639 v_2 := v.Args[2] 6640 if v_2.Op != OpAMD64FlagLT_UGT { 6641 break 6642 } 6643 v.reset(OpCopy) 6644 v.Type = y.Type 6645 v.AddArg(y) 6646 return true 6647 } 6648 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 6649 // cond: c != 0 6650 // result: x 6651 for { 6652 _ = v.Args[2] 6653 x := v.Args[0] 6654 v_2 := v.Args[2] 6655 if v_2.Op != OpSelect1 { 6656 break 6657 } 6658 v_2_0 := v_2.Args[0] 6659 if v_2_0.Op != OpAMD64BSFQ { 6660 break 6661 } 6662 v_2_0_0 := v_2_0.Args[0] 6663 if v_2_0_0.Op != OpAMD64ORQconst { 6664 break 6665 } 6666 c := v_2_0_0.AuxInt 6667 if !(c != 0) { 6668 break 6669 } 6670 v.reset(OpCopy) 6671 v.Type = x.Type 6672 v.AddArg(x) 6673 return true 6674 } 6675 return false 6676 } 6677 func rewriteValueAMD64_OpAMD64CMOVQGE_0(v *Value) bool { 6678 // match: (CMOVQGE x y (InvertFlags cond)) 6679 // result: (CMOVQLE x y cond) 6680 for { 6681 _ = v.Args[2] 6682 x := v.Args[0] 6683 y := v.Args[1] 6684 v_2 := v.Args[2] 6685 if v_2.Op != OpAMD64InvertFlags { 6686 break 6687 } 6688 cond := v_2.Args[0] 6689 v.reset(OpAMD64CMOVQLE) 6690 v.AddArg(x) 6691 v.AddArg(y) 6692 v.AddArg(cond) 6693 return true 6694 } 6695 // match: (CMOVQGE _ x (FlagEQ)) 6696 // result: x 6697 for { 6698 _ = v.Args[2] 6699 x := v.Args[1] 6700 v_2 := v.Args[2] 6701 if v_2.Op != OpAMD64FlagEQ { 6702 break 6703 } 6704 v.reset(OpCopy) 6705 v.Type = x.Type 6706 v.AddArg(x) 6707 return true 6708 } 6709 // match: (CMOVQGE _ x (FlagGT_UGT)) 6710 // result: x 6711 for { 6712 _ = v.Args[2] 6713 x := v.Args[1] 6714 v_2 := v.Args[2] 6715 if v_2.Op != OpAMD64FlagGT_UGT { 6716 break 6717 } 6718 v.reset(OpCopy) 6719 v.Type = x.Type 6720 v.AddArg(x) 6721 return true 6722 } 6723 // match: (CMOVQGE _ x (FlagGT_ULT)) 6724 // result: x 6725 for { 6726 _ = v.Args[2] 6727 x := v.Args[1] 6728 v_2 := v.Args[2] 6729 if v_2.Op != OpAMD64FlagGT_ULT { 6730 break 6731 } 6732 v.reset(OpCopy) 6733 v.Type = x.Type 6734 v.AddArg(x) 6735 return true 6736 } 6737 // match: (CMOVQGE y _ (FlagLT_ULT)) 6738 // result: y 6739 for { 6740 _ = v.Args[2] 6741 y := v.Args[0] 6742 v_2 := v.Args[2] 6743 if v_2.Op != OpAMD64FlagLT_ULT { 6744 break 6745 } 6746 v.reset(OpCopy) 6747 v.Type = y.Type 6748 v.AddArg(y) 6749 return true 6750 } 6751 // match: (CMOVQGE y _ (FlagLT_UGT)) 6752 // result: y 6753 for { 6754 _ = v.Args[2] 6755 y := v.Args[0] 6756 v_2 := v.Args[2] 6757 if v_2.Op != OpAMD64FlagLT_UGT { 6758 break 6759 } 6760 v.reset(OpCopy) 6761 v.Type = y.Type 6762 v.AddArg(y) 6763 return true 6764 } 6765 return false 6766 } 6767 func rewriteValueAMD64_OpAMD64CMOVQGT_0(v *Value) bool { 6768 // match: (CMOVQGT x y (InvertFlags cond)) 6769 // result: (CMOVQLT x y cond) 6770 for { 6771 _ = v.Args[2] 6772 x := v.Args[0] 6773 y := v.Args[1] 6774 v_2 := v.Args[2] 6775 if v_2.Op != OpAMD64InvertFlags { 6776 break 6777 } 6778 cond := v_2.Args[0] 6779 v.reset(OpAMD64CMOVQLT) 6780 v.AddArg(x) 6781 v.AddArg(y) 6782 v.AddArg(cond) 6783 return true 6784 } 6785 // match: (CMOVQGT y _ (FlagEQ)) 6786 // result: y 6787 for { 6788 _ = v.Args[2] 6789 y := v.Args[0] 6790 v_2 := v.Args[2] 6791 if v_2.Op != OpAMD64FlagEQ { 6792 break 6793 } 6794 v.reset(OpCopy) 6795 v.Type = y.Type 6796 v.AddArg(y) 6797 return true 6798 } 6799 // match: (CMOVQGT _ x (FlagGT_UGT)) 6800 // result: x 6801 for { 6802 _ = v.Args[2] 6803 x := v.Args[1] 6804 v_2 := v.Args[2] 6805 if v_2.Op != OpAMD64FlagGT_UGT { 6806 break 6807 } 6808 v.reset(OpCopy) 6809 v.Type = x.Type 6810 v.AddArg(x) 6811 return true 6812 } 6813 // match: (CMOVQGT _ x (FlagGT_ULT)) 6814 // result: x 6815 for { 6816 _ = v.Args[2] 6817 x := v.Args[1] 6818 v_2 := v.Args[2] 6819 if v_2.Op != OpAMD64FlagGT_ULT { 6820 break 6821 } 6822 v.reset(OpCopy) 6823 v.Type = x.Type 6824 v.AddArg(x) 6825 return true 6826 } 6827 // match: (CMOVQGT y _ (FlagLT_ULT)) 6828 // result: y 6829 for { 6830 _ = v.Args[2] 6831 y := v.Args[0] 6832 v_2 := v.Args[2] 6833 if v_2.Op != OpAMD64FlagLT_ULT { 6834 break 6835 } 6836 v.reset(OpCopy) 6837 v.Type = y.Type 6838 v.AddArg(y) 6839 return true 6840 } 6841 // match: (CMOVQGT y _ (FlagLT_UGT)) 6842 // result: y 6843 for { 6844 _ = v.Args[2] 6845 y := v.Args[0] 6846 v_2 := v.Args[2] 6847 if v_2.Op != OpAMD64FlagLT_UGT { 6848 break 6849 } 6850 v.reset(OpCopy) 6851 v.Type = y.Type 6852 v.AddArg(y) 6853 return true 6854 } 6855 return false 6856 } 6857 func rewriteValueAMD64_OpAMD64CMOVQHI_0(v *Value) bool { 6858 // match: (CMOVQHI x y (InvertFlags cond)) 6859 // result: (CMOVQCS x y cond) 6860 for { 6861 _ = v.Args[2] 6862 x := v.Args[0] 6863 y := v.Args[1] 6864 v_2 := v.Args[2] 6865 if v_2.Op != OpAMD64InvertFlags { 6866 break 6867 } 6868 cond := v_2.Args[0] 6869 v.reset(OpAMD64CMOVQCS) 6870 v.AddArg(x) 6871 v.AddArg(y) 6872 v.AddArg(cond) 6873 return true 6874 } 6875 // match: (CMOVQHI y _ (FlagEQ)) 6876 // result: y 6877 for { 6878 _ = v.Args[2] 6879 y := v.Args[0] 6880 v_2 := v.Args[2] 6881 if v_2.Op != OpAMD64FlagEQ { 6882 break 6883 } 6884 v.reset(OpCopy) 6885 v.Type = y.Type 6886 v.AddArg(y) 6887 return true 6888 } 6889 // match: (CMOVQHI _ x (FlagGT_UGT)) 6890 // result: x 6891 for { 6892 _ = v.Args[2] 6893 x := v.Args[1] 6894 v_2 := v.Args[2] 6895 if v_2.Op != OpAMD64FlagGT_UGT { 6896 break 6897 } 6898 v.reset(OpCopy) 6899 v.Type = x.Type 6900 v.AddArg(x) 6901 return true 6902 } 6903 // match: (CMOVQHI y _ (FlagGT_ULT)) 6904 // result: y 6905 for { 6906 _ = v.Args[2] 6907 y := v.Args[0] 6908 v_2 := v.Args[2] 6909 if v_2.Op != OpAMD64FlagGT_ULT { 6910 break 6911 } 6912 v.reset(OpCopy) 6913 v.Type = y.Type 6914 v.AddArg(y) 6915 return true 6916 } 6917 // match: (CMOVQHI y _ (FlagLT_ULT)) 6918 // result: y 6919 for { 6920 _ = v.Args[2] 6921 y := v.Args[0] 6922 v_2 := v.Args[2] 6923 if v_2.Op != OpAMD64FlagLT_ULT { 6924 break 6925 } 6926 v.reset(OpCopy) 6927 v.Type = y.Type 6928 v.AddArg(y) 6929 return true 6930 } 6931 // match: (CMOVQHI _ x (FlagLT_UGT)) 6932 // result: x 6933 for { 6934 _ = v.Args[2] 6935 x := v.Args[1] 6936 v_2 := v.Args[2] 6937 if v_2.Op != OpAMD64FlagLT_UGT { 6938 break 6939 } 6940 v.reset(OpCopy) 6941 v.Type = x.Type 6942 v.AddArg(x) 6943 return true 6944 } 6945 return false 6946 } 6947 func rewriteValueAMD64_OpAMD64CMOVQLE_0(v *Value) bool { 6948 // match: (CMOVQLE x y (InvertFlags cond)) 6949 // result: (CMOVQGE x y cond) 6950 for { 6951 _ = v.Args[2] 6952 x := v.Args[0] 6953 y := v.Args[1] 6954 v_2 := v.Args[2] 6955 if v_2.Op != OpAMD64InvertFlags { 6956 break 6957 } 6958 cond := v_2.Args[0] 6959 v.reset(OpAMD64CMOVQGE) 6960 v.AddArg(x) 6961 v.AddArg(y) 6962 v.AddArg(cond) 6963 return true 6964 } 6965 // match: (CMOVQLE _ x (FlagEQ)) 6966 // result: x 6967 for { 6968 _ = v.Args[2] 6969 x := v.Args[1] 6970 v_2 := v.Args[2] 6971 if v_2.Op != OpAMD64FlagEQ { 6972 break 6973 } 6974 v.reset(OpCopy) 6975 v.Type = x.Type 6976 v.AddArg(x) 6977 return true 6978 } 6979 // match: (CMOVQLE y _ (FlagGT_UGT)) 6980 // result: y 6981 for { 6982 _ = v.Args[2] 6983 y := v.Args[0] 6984 v_2 := v.Args[2] 6985 if v_2.Op != OpAMD64FlagGT_UGT { 6986 break 6987 } 6988 v.reset(OpCopy) 6989 v.Type = y.Type 6990 v.AddArg(y) 6991 return true 6992 } 6993 // match: (CMOVQLE y _ (FlagGT_ULT)) 6994 // result: y 6995 for { 6996 _ = v.Args[2] 6997 y := v.Args[0] 6998 v_2 := v.Args[2] 6999 if v_2.Op != OpAMD64FlagGT_ULT { 7000 break 7001 } 7002 v.reset(OpCopy) 7003 v.Type = y.Type 7004 v.AddArg(y) 7005 return true 7006 } 7007 // match: (CMOVQLE _ x (FlagLT_ULT)) 7008 // result: x 7009 for { 7010 _ = v.Args[2] 7011 x := v.Args[1] 7012 v_2 := v.Args[2] 7013 if v_2.Op != OpAMD64FlagLT_ULT { 7014 break 7015 } 7016 v.reset(OpCopy) 7017 v.Type = x.Type 7018 v.AddArg(x) 7019 return true 7020 } 7021 // match: (CMOVQLE _ x (FlagLT_UGT)) 7022 // result: x 7023 for { 7024 _ = v.Args[2] 7025 x := v.Args[1] 7026 v_2 := v.Args[2] 7027 if v_2.Op != OpAMD64FlagLT_UGT { 7028 break 7029 } 7030 v.reset(OpCopy) 7031 v.Type = x.Type 7032 v.AddArg(x) 7033 return true 7034 } 7035 return false 7036 } 7037 func rewriteValueAMD64_OpAMD64CMOVQLS_0(v *Value) bool { 7038 // match: (CMOVQLS x y (InvertFlags cond)) 7039 // result: (CMOVQCC x y cond) 7040 for { 7041 _ = v.Args[2] 7042 x := v.Args[0] 7043 y := v.Args[1] 7044 v_2 := v.Args[2] 7045 if v_2.Op != OpAMD64InvertFlags { 7046 break 7047 } 7048 cond := v_2.Args[0] 7049 v.reset(OpAMD64CMOVQCC) 7050 v.AddArg(x) 7051 v.AddArg(y) 7052 v.AddArg(cond) 7053 return true 7054 } 7055 // match: (CMOVQLS _ x (FlagEQ)) 7056 // result: x 7057 for { 7058 _ = v.Args[2] 7059 x := v.Args[1] 7060 v_2 := v.Args[2] 7061 if v_2.Op != OpAMD64FlagEQ { 7062 break 7063 } 7064 v.reset(OpCopy) 7065 v.Type = x.Type 7066 v.AddArg(x) 7067 return true 7068 } 7069 // match: (CMOVQLS y _ (FlagGT_UGT)) 7070 // result: y 7071 for { 7072 _ = v.Args[2] 7073 y := v.Args[0] 7074 v_2 := v.Args[2] 7075 if v_2.Op != OpAMD64FlagGT_UGT { 7076 break 7077 } 7078 v.reset(OpCopy) 7079 v.Type = y.Type 7080 v.AddArg(y) 7081 return true 7082 } 7083 // match: (CMOVQLS _ x (FlagGT_ULT)) 7084 // result: x 7085 for { 7086 _ = v.Args[2] 7087 x := v.Args[1] 7088 v_2 := v.Args[2] 7089 if v_2.Op != OpAMD64FlagGT_ULT { 7090 break 7091 } 7092 v.reset(OpCopy) 7093 v.Type = x.Type 7094 v.AddArg(x) 7095 return true 7096 } 7097 // match: (CMOVQLS _ x (FlagLT_ULT)) 7098 // result: x 7099 for { 7100 _ = v.Args[2] 7101 x := v.Args[1] 7102 v_2 := v.Args[2] 7103 if v_2.Op != OpAMD64FlagLT_ULT { 7104 break 7105 } 7106 v.reset(OpCopy) 7107 v.Type = x.Type 7108 v.AddArg(x) 7109 return true 7110 } 7111 // match: (CMOVQLS y _ (FlagLT_UGT)) 7112 // result: y 7113 for { 7114 _ = v.Args[2] 7115 y := v.Args[0] 7116 v_2 := v.Args[2] 7117 if v_2.Op != OpAMD64FlagLT_UGT { 7118 break 7119 } 7120 v.reset(OpCopy) 7121 v.Type = y.Type 7122 v.AddArg(y) 7123 return true 7124 } 7125 return false 7126 } 7127 func rewriteValueAMD64_OpAMD64CMOVQLT_0(v *Value) bool { 7128 // match: (CMOVQLT x y (InvertFlags cond)) 7129 // result: (CMOVQGT x y cond) 7130 for { 7131 _ = v.Args[2] 7132 x := v.Args[0] 7133 y := v.Args[1] 7134 v_2 := v.Args[2] 7135 if v_2.Op != OpAMD64InvertFlags { 7136 break 7137 } 7138 cond := v_2.Args[0] 7139 v.reset(OpAMD64CMOVQGT) 7140 v.AddArg(x) 7141 v.AddArg(y) 7142 v.AddArg(cond) 7143 return true 7144 } 7145 // match: (CMOVQLT y _ (FlagEQ)) 7146 // result: y 7147 for { 7148 _ = v.Args[2] 7149 y := v.Args[0] 7150 v_2 := v.Args[2] 7151 if v_2.Op != OpAMD64FlagEQ { 7152 break 7153 } 7154 v.reset(OpCopy) 7155 v.Type = y.Type 7156 v.AddArg(y) 7157 return true 7158 } 7159 // match: (CMOVQLT y _ (FlagGT_UGT)) 7160 // result: y 7161 for { 7162 _ = v.Args[2] 7163 y := v.Args[0] 7164 v_2 := v.Args[2] 7165 if v_2.Op != OpAMD64FlagGT_UGT { 7166 break 7167 } 7168 v.reset(OpCopy) 7169 v.Type = y.Type 7170 v.AddArg(y) 7171 return true 7172 } 7173 // match: (CMOVQLT y _ (FlagGT_ULT)) 7174 // result: y 7175 for { 7176 _ = v.Args[2] 7177 y := v.Args[0] 7178 v_2 := v.Args[2] 7179 if v_2.Op != OpAMD64FlagGT_ULT { 7180 break 7181 } 7182 v.reset(OpCopy) 7183 v.Type = y.Type 7184 v.AddArg(y) 7185 return true 7186 } 7187 // match: (CMOVQLT _ x (FlagLT_ULT)) 7188 // result: x 7189 for { 7190 _ = v.Args[2] 7191 x := v.Args[1] 7192 v_2 := v.Args[2] 7193 if v_2.Op != OpAMD64FlagLT_ULT { 7194 break 7195 } 7196 v.reset(OpCopy) 7197 v.Type = x.Type 7198 v.AddArg(x) 7199 return true 7200 } 7201 // match: (CMOVQLT _ x (FlagLT_UGT)) 7202 // result: x 7203 for { 7204 _ = v.Args[2] 7205 x := v.Args[1] 7206 v_2 := v.Args[2] 7207 if v_2.Op != OpAMD64FlagLT_UGT { 7208 break 7209 } 7210 v.reset(OpCopy) 7211 v.Type = x.Type 7212 v.AddArg(x) 7213 return true 7214 } 7215 return false 7216 } 7217 func rewriteValueAMD64_OpAMD64CMOVQNE_0(v *Value) bool { 7218 // match: (CMOVQNE x y (InvertFlags cond)) 7219 // result: (CMOVQNE x y cond) 7220 for { 7221 _ = v.Args[2] 7222 x := v.Args[0] 7223 y := v.Args[1] 7224 v_2 := v.Args[2] 7225 if v_2.Op != OpAMD64InvertFlags { 7226 break 7227 } 7228 cond := v_2.Args[0] 7229 v.reset(OpAMD64CMOVQNE) 7230 v.AddArg(x) 7231 v.AddArg(y) 7232 v.AddArg(cond) 7233 return true 7234 } 7235 // match: (CMOVQNE y _ (FlagEQ)) 7236 // result: y 7237 for { 7238 _ = v.Args[2] 7239 y := v.Args[0] 7240 v_2 := v.Args[2] 7241 if v_2.Op != OpAMD64FlagEQ { 7242 break 7243 } 7244 v.reset(OpCopy) 7245 v.Type = y.Type 7246 v.AddArg(y) 7247 return true 7248 } 7249 // match: (CMOVQNE _ x (FlagGT_UGT)) 7250 // result: x 7251 for { 7252 _ = v.Args[2] 7253 x := v.Args[1] 7254 v_2 := v.Args[2] 7255 if v_2.Op != OpAMD64FlagGT_UGT { 7256 break 7257 } 7258 v.reset(OpCopy) 7259 v.Type = x.Type 7260 v.AddArg(x) 7261 return true 7262 } 7263 // match: (CMOVQNE _ x (FlagGT_ULT)) 7264 // result: x 7265 for { 7266 _ = v.Args[2] 7267 x := v.Args[1] 7268 v_2 := v.Args[2] 7269 if v_2.Op != OpAMD64FlagGT_ULT { 7270 break 7271 } 7272 v.reset(OpCopy) 7273 v.Type = x.Type 7274 v.AddArg(x) 7275 return true 7276 } 7277 // match: (CMOVQNE _ x (FlagLT_ULT)) 7278 // result: x 7279 for { 7280 _ = v.Args[2] 7281 x := v.Args[1] 7282 v_2 := v.Args[2] 7283 if v_2.Op != OpAMD64FlagLT_ULT { 7284 break 7285 } 7286 v.reset(OpCopy) 7287 v.Type = x.Type 7288 v.AddArg(x) 7289 return true 7290 } 7291 // match: (CMOVQNE _ x (FlagLT_UGT)) 7292 // result: x 7293 for { 7294 _ = v.Args[2] 7295 x := v.Args[1] 7296 v_2 := v.Args[2] 7297 if v_2.Op != OpAMD64FlagLT_UGT { 7298 break 7299 } 7300 v.reset(OpCopy) 7301 v.Type = x.Type 7302 v.AddArg(x) 7303 return true 7304 } 7305 return false 7306 } 7307 func rewriteValueAMD64_OpAMD64CMOVWCC_0(v *Value) bool { 7308 // match: (CMOVWCC x y (InvertFlags cond)) 7309 // result: (CMOVWLS x y cond) 7310 for { 7311 _ = v.Args[2] 7312 x := v.Args[0] 7313 y := v.Args[1] 7314 v_2 := v.Args[2] 7315 if v_2.Op != OpAMD64InvertFlags { 7316 break 7317 } 7318 cond := v_2.Args[0] 7319 v.reset(OpAMD64CMOVWLS) 7320 v.AddArg(x) 7321 v.AddArg(y) 7322 v.AddArg(cond) 7323 return true 7324 } 7325 // match: (CMOVWCC _ x (FlagEQ)) 7326 // result: x 7327 for { 7328 _ = v.Args[2] 7329 x := v.Args[1] 7330 v_2 := v.Args[2] 7331 if v_2.Op != OpAMD64FlagEQ { 7332 break 7333 } 7334 v.reset(OpCopy) 7335 v.Type = x.Type 7336 v.AddArg(x) 7337 return true 7338 } 7339 // match: (CMOVWCC _ x (FlagGT_UGT)) 7340 // result: x 7341 for { 7342 _ = v.Args[2] 7343 x := v.Args[1] 7344 v_2 := v.Args[2] 7345 if v_2.Op != OpAMD64FlagGT_UGT { 7346 break 7347 } 7348 v.reset(OpCopy) 7349 v.Type = x.Type 7350 v.AddArg(x) 7351 return true 7352 } 7353 // match: (CMOVWCC y _ (FlagGT_ULT)) 7354 // result: y 7355 for { 7356 _ = v.Args[2] 7357 y := v.Args[0] 7358 v_2 := v.Args[2] 7359 if v_2.Op != OpAMD64FlagGT_ULT { 7360 break 7361 } 7362 v.reset(OpCopy) 7363 v.Type = y.Type 7364 v.AddArg(y) 7365 return true 7366 } 7367 // match: (CMOVWCC y _ (FlagLT_ULT)) 7368 // result: y 7369 for { 7370 _ = v.Args[2] 7371 y := v.Args[0] 7372 v_2 := v.Args[2] 7373 if v_2.Op != OpAMD64FlagLT_ULT { 7374 break 7375 } 7376 v.reset(OpCopy) 7377 v.Type = y.Type 7378 v.AddArg(y) 7379 return true 7380 } 7381 // match: (CMOVWCC _ x (FlagLT_UGT)) 7382 // result: x 7383 for { 7384 _ = v.Args[2] 7385 x := v.Args[1] 7386 v_2 := v.Args[2] 7387 if v_2.Op != OpAMD64FlagLT_UGT { 7388 break 7389 } 7390 v.reset(OpCopy) 7391 v.Type = x.Type 7392 v.AddArg(x) 7393 return true 7394 } 7395 return false 7396 } 7397 func rewriteValueAMD64_OpAMD64CMOVWCS_0(v *Value) bool { 7398 // match: (CMOVWCS x y (InvertFlags cond)) 7399 // result: (CMOVWHI x y cond) 7400 for { 7401 _ = v.Args[2] 7402 x := v.Args[0] 7403 y := v.Args[1] 7404 v_2 := v.Args[2] 7405 if v_2.Op != OpAMD64InvertFlags { 7406 break 7407 } 7408 cond := v_2.Args[0] 7409 v.reset(OpAMD64CMOVWHI) 7410 v.AddArg(x) 7411 v.AddArg(y) 7412 v.AddArg(cond) 7413 return true 7414 } 7415 // match: (CMOVWCS y _ (FlagEQ)) 7416 // result: y 7417 for { 7418 _ = v.Args[2] 7419 y := v.Args[0] 7420 v_2 := v.Args[2] 7421 if v_2.Op != OpAMD64FlagEQ { 7422 break 7423 } 7424 v.reset(OpCopy) 7425 v.Type = y.Type 7426 v.AddArg(y) 7427 return true 7428 } 7429 // match: (CMOVWCS y _ (FlagGT_UGT)) 7430 // result: y 7431 for { 7432 _ = v.Args[2] 7433 y := v.Args[0] 7434 v_2 := v.Args[2] 7435 if v_2.Op != OpAMD64FlagGT_UGT { 7436 break 7437 } 7438 v.reset(OpCopy) 7439 v.Type = y.Type 7440 v.AddArg(y) 7441 return true 7442 } 7443 // match: (CMOVWCS _ x (FlagGT_ULT)) 7444 // result: x 7445 for { 7446 _ = v.Args[2] 7447 x := v.Args[1] 7448 v_2 := v.Args[2] 7449 if v_2.Op != OpAMD64FlagGT_ULT { 7450 break 7451 } 7452 v.reset(OpCopy) 7453 v.Type = x.Type 7454 v.AddArg(x) 7455 return true 7456 } 7457 // match: (CMOVWCS _ x (FlagLT_ULT)) 7458 // result: x 7459 for { 7460 _ = v.Args[2] 7461 x := v.Args[1] 7462 v_2 := v.Args[2] 7463 if v_2.Op != OpAMD64FlagLT_ULT { 7464 break 7465 } 7466 v.reset(OpCopy) 7467 v.Type = x.Type 7468 v.AddArg(x) 7469 return true 7470 } 7471 // match: (CMOVWCS y _ (FlagLT_UGT)) 7472 // result: y 7473 for { 7474 _ = v.Args[2] 7475 y := v.Args[0] 7476 v_2 := v.Args[2] 7477 if v_2.Op != OpAMD64FlagLT_UGT { 7478 break 7479 } 7480 v.reset(OpCopy) 7481 v.Type = y.Type 7482 v.AddArg(y) 7483 return true 7484 } 7485 return false 7486 } 7487 func rewriteValueAMD64_OpAMD64CMOVWEQ_0(v *Value) bool { 7488 // match: (CMOVWEQ x y (InvertFlags cond)) 7489 // result: (CMOVWEQ x y cond) 7490 for { 7491 _ = v.Args[2] 7492 x := v.Args[0] 7493 y := v.Args[1] 7494 v_2 := v.Args[2] 7495 if v_2.Op != OpAMD64InvertFlags { 7496 break 7497 } 7498 cond := v_2.Args[0] 7499 v.reset(OpAMD64CMOVWEQ) 7500 v.AddArg(x) 7501 v.AddArg(y) 7502 v.AddArg(cond) 7503 return true 7504 } 7505 // match: (CMOVWEQ _ x (FlagEQ)) 7506 // result: x 7507 for { 7508 _ = v.Args[2] 7509 x := v.Args[1] 7510 v_2 := v.Args[2] 7511 if v_2.Op != OpAMD64FlagEQ { 7512 break 7513 } 7514 v.reset(OpCopy) 7515 v.Type = x.Type 7516 v.AddArg(x) 7517 return true 7518 } 7519 // match: (CMOVWEQ y _ (FlagGT_UGT)) 7520 // result: y 7521 for { 7522 _ = v.Args[2] 7523 y := v.Args[0] 7524 v_2 := v.Args[2] 7525 if v_2.Op != OpAMD64FlagGT_UGT { 7526 break 7527 } 7528 v.reset(OpCopy) 7529 v.Type = y.Type 7530 v.AddArg(y) 7531 return true 7532 } 7533 // match: (CMOVWEQ y _ (FlagGT_ULT)) 7534 // result: y 7535 for { 7536 _ = v.Args[2] 7537 y := v.Args[0] 7538 v_2 := v.Args[2] 7539 if v_2.Op != OpAMD64FlagGT_ULT { 7540 break 7541 } 7542 v.reset(OpCopy) 7543 v.Type = y.Type 7544 v.AddArg(y) 7545 return true 7546 } 7547 // match: (CMOVWEQ y _ (FlagLT_ULT)) 7548 // result: y 7549 for { 7550 _ = v.Args[2] 7551 y := v.Args[0] 7552 v_2 := v.Args[2] 7553 if v_2.Op != OpAMD64FlagLT_ULT { 7554 break 7555 } 7556 v.reset(OpCopy) 7557 v.Type = y.Type 7558 v.AddArg(y) 7559 return true 7560 } 7561 // match: (CMOVWEQ y _ (FlagLT_UGT)) 7562 // result: y 7563 for { 7564 _ = v.Args[2] 7565 y := v.Args[0] 7566 v_2 := v.Args[2] 7567 if v_2.Op != OpAMD64FlagLT_UGT { 7568 break 7569 } 7570 v.reset(OpCopy) 7571 v.Type = y.Type 7572 v.AddArg(y) 7573 return true 7574 } 7575 return false 7576 } 7577 func rewriteValueAMD64_OpAMD64CMOVWGE_0(v *Value) bool { 7578 // match: (CMOVWGE x y (InvertFlags cond)) 7579 // result: (CMOVWLE x y cond) 7580 for { 7581 _ = v.Args[2] 7582 x := v.Args[0] 7583 y := v.Args[1] 7584 v_2 := v.Args[2] 7585 if v_2.Op != OpAMD64InvertFlags { 7586 break 7587 } 7588 cond := v_2.Args[0] 7589 v.reset(OpAMD64CMOVWLE) 7590 v.AddArg(x) 7591 v.AddArg(y) 7592 v.AddArg(cond) 7593 return true 7594 } 7595 // match: (CMOVWGE _ x (FlagEQ)) 7596 // result: x 7597 for { 7598 _ = v.Args[2] 7599 x := v.Args[1] 7600 v_2 := v.Args[2] 7601 if v_2.Op != OpAMD64FlagEQ { 7602 break 7603 } 7604 v.reset(OpCopy) 7605 v.Type = x.Type 7606 v.AddArg(x) 7607 return true 7608 } 7609 // match: (CMOVWGE _ x (FlagGT_UGT)) 7610 // result: x 7611 for { 7612 _ = v.Args[2] 7613 x := v.Args[1] 7614 v_2 := v.Args[2] 7615 if v_2.Op != OpAMD64FlagGT_UGT { 7616 break 7617 } 7618 v.reset(OpCopy) 7619 v.Type = x.Type 7620 v.AddArg(x) 7621 return true 7622 } 7623 // match: (CMOVWGE _ x (FlagGT_ULT)) 7624 // result: x 7625 for { 7626 _ = v.Args[2] 7627 x := v.Args[1] 7628 v_2 := v.Args[2] 7629 if v_2.Op != OpAMD64FlagGT_ULT { 7630 break 7631 } 7632 v.reset(OpCopy) 7633 v.Type = x.Type 7634 v.AddArg(x) 7635 return true 7636 } 7637 // match: (CMOVWGE y _ (FlagLT_ULT)) 7638 // result: y 7639 for { 7640 _ = v.Args[2] 7641 y := v.Args[0] 7642 v_2 := v.Args[2] 7643 if v_2.Op != OpAMD64FlagLT_ULT { 7644 break 7645 } 7646 v.reset(OpCopy) 7647 v.Type = y.Type 7648 v.AddArg(y) 7649 return true 7650 } 7651 // match: (CMOVWGE y _ (FlagLT_UGT)) 7652 // result: y 7653 for { 7654 _ = v.Args[2] 7655 y := v.Args[0] 7656 v_2 := v.Args[2] 7657 if v_2.Op != OpAMD64FlagLT_UGT { 7658 break 7659 } 7660 v.reset(OpCopy) 7661 v.Type = y.Type 7662 v.AddArg(y) 7663 return true 7664 } 7665 return false 7666 } 7667 func rewriteValueAMD64_OpAMD64CMOVWGT_0(v *Value) bool { 7668 // match: (CMOVWGT x y (InvertFlags cond)) 7669 // result: (CMOVWLT x y cond) 7670 for { 7671 _ = v.Args[2] 7672 x := v.Args[0] 7673 y := v.Args[1] 7674 v_2 := v.Args[2] 7675 if v_2.Op != OpAMD64InvertFlags { 7676 break 7677 } 7678 cond := v_2.Args[0] 7679 v.reset(OpAMD64CMOVWLT) 7680 v.AddArg(x) 7681 v.AddArg(y) 7682 v.AddArg(cond) 7683 return true 7684 } 7685 // match: (CMOVWGT y _ (FlagEQ)) 7686 // result: y 7687 for { 7688 _ = v.Args[2] 7689 y := v.Args[0] 7690 v_2 := v.Args[2] 7691 if v_2.Op != OpAMD64FlagEQ { 7692 break 7693 } 7694 v.reset(OpCopy) 7695 v.Type = y.Type 7696 v.AddArg(y) 7697 return true 7698 } 7699 // match: (CMOVWGT _ x (FlagGT_UGT)) 7700 // result: x 7701 for { 7702 _ = v.Args[2] 7703 x := v.Args[1] 7704 v_2 := v.Args[2] 7705 if v_2.Op != OpAMD64FlagGT_UGT { 7706 break 7707 } 7708 v.reset(OpCopy) 7709 v.Type = x.Type 7710 v.AddArg(x) 7711 return true 7712 } 7713 // match: (CMOVWGT _ x (FlagGT_ULT)) 7714 // result: x 7715 for { 7716 _ = v.Args[2] 7717 x := v.Args[1] 7718 v_2 := v.Args[2] 7719 if v_2.Op != OpAMD64FlagGT_ULT { 7720 break 7721 } 7722 v.reset(OpCopy) 7723 v.Type = x.Type 7724 v.AddArg(x) 7725 return true 7726 } 7727 // match: (CMOVWGT y _ (FlagLT_ULT)) 7728 // result: y 7729 for { 7730 _ = v.Args[2] 7731 y := v.Args[0] 7732 v_2 := v.Args[2] 7733 if v_2.Op != OpAMD64FlagLT_ULT { 7734 break 7735 } 7736 v.reset(OpCopy) 7737 v.Type = y.Type 7738 v.AddArg(y) 7739 return true 7740 } 7741 // match: (CMOVWGT y _ (FlagLT_UGT)) 7742 // result: y 7743 for { 7744 _ = v.Args[2] 7745 y := v.Args[0] 7746 v_2 := v.Args[2] 7747 if v_2.Op != OpAMD64FlagLT_UGT { 7748 break 7749 } 7750 v.reset(OpCopy) 7751 v.Type = y.Type 7752 v.AddArg(y) 7753 return true 7754 } 7755 return false 7756 } 7757 func rewriteValueAMD64_OpAMD64CMOVWHI_0(v *Value) bool { 7758 // match: (CMOVWHI x y (InvertFlags cond)) 7759 // result: (CMOVWCS x y cond) 7760 for { 7761 _ = v.Args[2] 7762 x := v.Args[0] 7763 y := v.Args[1] 7764 v_2 := v.Args[2] 7765 if v_2.Op != OpAMD64InvertFlags { 7766 break 7767 } 7768 cond := v_2.Args[0] 7769 v.reset(OpAMD64CMOVWCS) 7770 v.AddArg(x) 7771 v.AddArg(y) 7772 v.AddArg(cond) 7773 return true 7774 } 7775 // match: (CMOVWHI y _ (FlagEQ)) 7776 // result: y 7777 for { 7778 _ = v.Args[2] 7779 y := v.Args[0] 7780 v_2 := v.Args[2] 7781 if v_2.Op != OpAMD64FlagEQ { 7782 break 7783 } 7784 v.reset(OpCopy) 7785 v.Type = y.Type 7786 v.AddArg(y) 7787 return true 7788 } 7789 // match: (CMOVWHI _ x (FlagGT_UGT)) 7790 // result: x 7791 for { 7792 _ = v.Args[2] 7793 x := v.Args[1] 7794 v_2 := v.Args[2] 7795 if v_2.Op != OpAMD64FlagGT_UGT { 7796 break 7797 } 7798 v.reset(OpCopy) 7799 v.Type = x.Type 7800 v.AddArg(x) 7801 return true 7802 } 7803 // match: (CMOVWHI y _ (FlagGT_ULT)) 7804 // result: y 7805 for { 7806 _ = v.Args[2] 7807 y := v.Args[0] 7808 v_2 := v.Args[2] 7809 if v_2.Op != OpAMD64FlagGT_ULT { 7810 break 7811 } 7812 v.reset(OpCopy) 7813 v.Type = y.Type 7814 v.AddArg(y) 7815 return true 7816 } 7817 // match: (CMOVWHI y _ (FlagLT_ULT)) 7818 // result: y 7819 for { 7820 _ = v.Args[2] 7821 y := v.Args[0] 7822 v_2 := v.Args[2] 7823 if v_2.Op != OpAMD64FlagLT_ULT { 7824 break 7825 } 7826 v.reset(OpCopy) 7827 v.Type = y.Type 7828 v.AddArg(y) 7829 return true 7830 } 7831 // match: (CMOVWHI _ x (FlagLT_UGT)) 7832 // result: x 7833 for { 7834 _ = v.Args[2] 7835 x := v.Args[1] 7836 v_2 := v.Args[2] 7837 if v_2.Op != OpAMD64FlagLT_UGT { 7838 break 7839 } 7840 v.reset(OpCopy) 7841 v.Type = x.Type 7842 v.AddArg(x) 7843 return true 7844 } 7845 return false 7846 } 7847 func rewriteValueAMD64_OpAMD64CMOVWLE_0(v *Value) bool { 7848 // match: (CMOVWLE x y (InvertFlags cond)) 7849 // result: (CMOVWGE x y cond) 7850 for { 7851 _ = v.Args[2] 7852 x := v.Args[0] 7853 y := v.Args[1] 7854 v_2 := v.Args[2] 7855 if v_2.Op != OpAMD64InvertFlags { 7856 break 7857 } 7858 cond := v_2.Args[0] 7859 v.reset(OpAMD64CMOVWGE) 7860 v.AddArg(x) 7861 v.AddArg(y) 7862 v.AddArg(cond) 7863 return true 7864 } 7865 // match: (CMOVWLE _ x (FlagEQ)) 7866 // result: x 7867 for { 7868 _ = v.Args[2] 7869 x := v.Args[1] 7870 v_2 := v.Args[2] 7871 if v_2.Op != OpAMD64FlagEQ { 7872 break 7873 } 7874 v.reset(OpCopy) 7875 v.Type = x.Type 7876 v.AddArg(x) 7877 return true 7878 } 7879 // match: (CMOVWLE y _ (FlagGT_UGT)) 7880 // result: y 7881 for { 7882 _ = v.Args[2] 7883 y := v.Args[0] 7884 v_2 := v.Args[2] 7885 if v_2.Op != OpAMD64FlagGT_UGT { 7886 break 7887 } 7888 v.reset(OpCopy) 7889 v.Type = y.Type 7890 v.AddArg(y) 7891 return true 7892 } 7893 // match: (CMOVWLE y _ (FlagGT_ULT)) 7894 // result: y 7895 for { 7896 _ = v.Args[2] 7897 y := v.Args[0] 7898 v_2 := v.Args[2] 7899 if v_2.Op != OpAMD64FlagGT_ULT { 7900 break 7901 } 7902 v.reset(OpCopy) 7903 v.Type = y.Type 7904 v.AddArg(y) 7905 return true 7906 } 7907 // match: (CMOVWLE _ x (FlagLT_ULT)) 7908 // result: x 7909 for { 7910 _ = v.Args[2] 7911 x := v.Args[1] 7912 v_2 := v.Args[2] 7913 if v_2.Op != OpAMD64FlagLT_ULT { 7914 break 7915 } 7916 v.reset(OpCopy) 7917 v.Type = x.Type 7918 v.AddArg(x) 7919 return true 7920 } 7921 // match: (CMOVWLE _ x (FlagLT_UGT)) 7922 // result: x 7923 for { 7924 _ = v.Args[2] 7925 x := v.Args[1] 7926 v_2 := v.Args[2] 7927 if v_2.Op != OpAMD64FlagLT_UGT { 7928 break 7929 } 7930 v.reset(OpCopy) 7931 v.Type = x.Type 7932 v.AddArg(x) 7933 return true 7934 } 7935 return false 7936 } 7937 func rewriteValueAMD64_OpAMD64CMOVWLS_0(v *Value) bool { 7938 // match: (CMOVWLS x y (InvertFlags cond)) 7939 // result: (CMOVWCC x y cond) 7940 for { 7941 _ = v.Args[2] 7942 x := v.Args[0] 7943 y := v.Args[1] 7944 v_2 := v.Args[2] 7945 if v_2.Op != OpAMD64InvertFlags { 7946 break 7947 } 7948 cond := v_2.Args[0] 7949 v.reset(OpAMD64CMOVWCC) 7950 v.AddArg(x) 7951 v.AddArg(y) 7952 v.AddArg(cond) 7953 return true 7954 } 7955 // match: (CMOVWLS _ x (FlagEQ)) 7956 // result: x 7957 for { 7958 _ = v.Args[2] 7959 x := v.Args[1] 7960 v_2 := v.Args[2] 7961 if v_2.Op != OpAMD64FlagEQ { 7962 break 7963 } 7964 v.reset(OpCopy) 7965 v.Type = x.Type 7966 v.AddArg(x) 7967 return true 7968 } 7969 // match: (CMOVWLS y _ (FlagGT_UGT)) 7970 // result: y 7971 for { 7972 _ = v.Args[2] 7973 y := v.Args[0] 7974 v_2 := v.Args[2] 7975 if v_2.Op != OpAMD64FlagGT_UGT { 7976 break 7977 } 7978 v.reset(OpCopy) 7979 v.Type = y.Type 7980 v.AddArg(y) 7981 return true 7982 } 7983 // match: (CMOVWLS _ x (FlagGT_ULT)) 7984 // result: x 7985 for { 7986 _ = v.Args[2] 7987 x := v.Args[1] 7988 v_2 := v.Args[2] 7989 if v_2.Op != OpAMD64FlagGT_ULT { 7990 break 7991 } 7992 v.reset(OpCopy) 7993 v.Type = x.Type 7994 v.AddArg(x) 7995 return true 7996 } 7997 // match: (CMOVWLS _ x (FlagLT_ULT)) 7998 // result: x 7999 for { 8000 _ = v.Args[2] 8001 x := v.Args[1] 8002 v_2 := v.Args[2] 8003 if v_2.Op != OpAMD64FlagLT_ULT { 8004 break 8005 } 8006 v.reset(OpCopy) 8007 v.Type = x.Type 8008 v.AddArg(x) 8009 return true 8010 } 8011 // match: (CMOVWLS y _ (FlagLT_UGT)) 8012 // result: y 8013 for { 8014 _ = v.Args[2] 8015 y := v.Args[0] 8016 v_2 := v.Args[2] 8017 if v_2.Op != OpAMD64FlagLT_UGT { 8018 break 8019 } 8020 v.reset(OpCopy) 8021 v.Type = y.Type 8022 v.AddArg(y) 8023 return true 8024 } 8025 return false 8026 } 8027 func rewriteValueAMD64_OpAMD64CMOVWLT_0(v *Value) bool { 8028 // match: (CMOVWLT x y (InvertFlags cond)) 8029 // result: (CMOVWGT x y cond) 8030 for { 8031 _ = v.Args[2] 8032 x := v.Args[0] 8033 y := v.Args[1] 8034 v_2 := v.Args[2] 8035 if v_2.Op != OpAMD64InvertFlags { 8036 break 8037 } 8038 cond := v_2.Args[0] 8039 v.reset(OpAMD64CMOVWGT) 8040 v.AddArg(x) 8041 v.AddArg(y) 8042 v.AddArg(cond) 8043 return true 8044 } 8045 // match: (CMOVWLT y _ (FlagEQ)) 8046 // result: y 8047 for { 8048 _ = v.Args[2] 8049 y := v.Args[0] 8050 v_2 := v.Args[2] 8051 if v_2.Op != OpAMD64FlagEQ { 8052 break 8053 } 8054 v.reset(OpCopy) 8055 v.Type = y.Type 8056 v.AddArg(y) 8057 return true 8058 } 8059 // match: (CMOVWLT y _ (FlagGT_UGT)) 8060 // result: y 8061 for { 8062 _ = v.Args[2] 8063 y := v.Args[0] 8064 v_2 := v.Args[2] 8065 if v_2.Op != OpAMD64FlagGT_UGT { 8066 break 8067 } 8068 v.reset(OpCopy) 8069 v.Type = y.Type 8070 v.AddArg(y) 8071 return true 8072 } 8073 // match: (CMOVWLT y _ (FlagGT_ULT)) 8074 // result: y 8075 for { 8076 _ = v.Args[2] 8077 y := v.Args[0] 8078 v_2 := v.Args[2] 8079 if v_2.Op != OpAMD64FlagGT_ULT { 8080 break 8081 } 8082 v.reset(OpCopy) 8083 v.Type = y.Type 8084 v.AddArg(y) 8085 return true 8086 } 8087 // match: (CMOVWLT _ x (FlagLT_ULT)) 8088 // result: x 8089 for { 8090 _ = v.Args[2] 8091 x := v.Args[1] 8092 v_2 := v.Args[2] 8093 if v_2.Op != OpAMD64FlagLT_ULT { 8094 break 8095 } 8096 v.reset(OpCopy) 8097 v.Type = x.Type 8098 v.AddArg(x) 8099 return true 8100 } 8101 // match: (CMOVWLT _ x (FlagLT_UGT)) 8102 // result: x 8103 for { 8104 _ = v.Args[2] 8105 x := v.Args[1] 8106 v_2 := v.Args[2] 8107 if v_2.Op != OpAMD64FlagLT_UGT { 8108 break 8109 } 8110 v.reset(OpCopy) 8111 v.Type = x.Type 8112 v.AddArg(x) 8113 return true 8114 } 8115 return false 8116 } 8117 func rewriteValueAMD64_OpAMD64CMOVWNE_0(v *Value) bool { 8118 // match: (CMOVWNE x y (InvertFlags cond)) 8119 // result: (CMOVWNE x y cond) 8120 for { 8121 _ = v.Args[2] 8122 x := v.Args[0] 8123 y := v.Args[1] 8124 v_2 := v.Args[2] 8125 if v_2.Op != OpAMD64InvertFlags { 8126 break 8127 } 8128 cond := v_2.Args[0] 8129 v.reset(OpAMD64CMOVWNE) 8130 v.AddArg(x) 8131 v.AddArg(y) 8132 v.AddArg(cond) 8133 return true 8134 } 8135 // match: (CMOVWNE y _ (FlagEQ)) 8136 // result: y 8137 for { 8138 _ = v.Args[2] 8139 y := v.Args[0] 8140 v_2 := v.Args[2] 8141 if v_2.Op != OpAMD64FlagEQ { 8142 break 8143 } 8144 v.reset(OpCopy) 8145 v.Type = y.Type 8146 v.AddArg(y) 8147 return true 8148 } 8149 // match: (CMOVWNE _ x (FlagGT_UGT)) 8150 // result: x 8151 for { 8152 _ = v.Args[2] 8153 x := v.Args[1] 8154 v_2 := v.Args[2] 8155 if v_2.Op != OpAMD64FlagGT_UGT { 8156 break 8157 } 8158 v.reset(OpCopy) 8159 v.Type = x.Type 8160 v.AddArg(x) 8161 return true 8162 } 8163 // match: (CMOVWNE _ x (FlagGT_ULT)) 8164 // result: x 8165 for { 8166 _ = v.Args[2] 8167 x := v.Args[1] 8168 v_2 := v.Args[2] 8169 if v_2.Op != OpAMD64FlagGT_ULT { 8170 break 8171 } 8172 v.reset(OpCopy) 8173 v.Type = x.Type 8174 v.AddArg(x) 8175 return true 8176 } 8177 // match: (CMOVWNE _ x (FlagLT_ULT)) 8178 // result: x 8179 for { 8180 _ = v.Args[2] 8181 x := v.Args[1] 8182 v_2 := v.Args[2] 8183 if v_2.Op != OpAMD64FlagLT_ULT { 8184 break 8185 } 8186 v.reset(OpCopy) 8187 v.Type = x.Type 8188 v.AddArg(x) 8189 return true 8190 } 8191 // match: (CMOVWNE _ x (FlagLT_UGT)) 8192 // result: x 8193 for { 8194 _ = v.Args[2] 8195 x := v.Args[1] 8196 v_2 := v.Args[2] 8197 if v_2.Op != OpAMD64FlagLT_UGT { 8198 break 8199 } 8200 v.reset(OpCopy) 8201 v.Type = x.Type 8202 v.AddArg(x) 8203 return true 8204 } 8205 return false 8206 } 8207 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { 8208 b := v.Block 8209 // match: (CMPB x (MOVLconst [c])) 8210 // result: (CMPBconst x [int64(int8(c))]) 8211 for { 8212 _ = v.Args[1] 8213 x := v.Args[0] 8214 v_1 := v.Args[1] 8215 if v_1.Op != OpAMD64MOVLconst { 8216 break 8217 } 8218 c := v_1.AuxInt 8219 v.reset(OpAMD64CMPBconst) 8220 v.AuxInt = int64(int8(c)) 8221 v.AddArg(x) 8222 return true 8223 } 8224 // match: (CMPB (MOVLconst [c]) x) 8225 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 8226 for { 8227 x := v.Args[1] 8228 v_0 := v.Args[0] 8229 if v_0.Op != OpAMD64MOVLconst { 8230 break 8231 } 8232 c := v_0.AuxInt 8233 v.reset(OpAMD64InvertFlags) 8234 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 8235 v0.AuxInt = int64(int8(c)) 8236 v0.AddArg(x) 8237 v.AddArg(v0) 8238 return true 8239 } 8240 // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x) 8241 // cond: canMergeLoad(v, l) && clobber(l) 8242 // result: (CMPBload {sym} [off] ptr x mem) 8243 for { 8244 x := v.Args[1] 8245 l := v.Args[0] 8246 if l.Op != OpAMD64MOVBload { 8247 break 8248 } 8249 off := l.AuxInt 8250 sym := l.Aux 8251 mem := l.Args[1] 8252 ptr := l.Args[0] 8253 if !(canMergeLoad(v, l) && clobber(l)) { 8254 break 8255 } 8256 v.reset(OpAMD64CMPBload) 8257 v.AuxInt = off 8258 v.Aux = sym 8259 v.AddArg(ptr) 8260 v.AddArg(x) 8261 v.AddArg(mem) 8262 return true 8263 } 8264 // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) 8265 // cond: canMergeLoad(v, l) && clobber(l) 8266 // result: (InvertFlags (CMPBload {sym} [off] ptr x mem)) 8267 for { 8268 _ = v.Args[1] 8269 x := v.Args[0] 8270 l := v.Args[1] 8271 if l.Op != OpAMD64MOVBload { 8272 break 8273 } 8274 off := l.AuxInt 8275 sym := l.Aux 8276 mem := l.Args[1] 8277 ptr := l.Args[0] 8278 if !(canMergeLoad(v, l) && clobber(l)) { 8279 break 8280 } 8281 v.reset(OpAMD64InvertFlags) 8282 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags) 8283 v0.AuxInt = off 8284 v0.Aux = sym 8285 v0.AddArg(ptr) 8286 v0.AddArg(x) 8287 v0.AddArg(mem) 8288 v.AddArg(v0) 8289 return true 8290 } 8291 return false 8292 } 8293 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { 8294 b := v.Block 8295 // match: (CMPBconst (MOVLconst [x]) [y]) 8296 // cond: int8(x)==int8(y) 8297 // result: (FlagEQ) 8298 for { 8299 y := v.AuxInt 8300 v_0 := v.Args[0] 8301 if v_0.Op != OpAMD64MOVLconst { 8302 break 8303 } 8304 x := v_0.AuxInt 8305 if !(int8(x) == int8(y)) { 8306 break 8307 } 8308 v.reset(OpAMD64FlagEQ) 8309 return true 8310 } 8311 // match: (CMPBconst (MOVLconst [x]) [y]) 8312 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 8313 // result: (FlagLT_ULT) 8314 for { 8315 y := v.AuxInt 8316 v_0 := v.Args[0] 8317 if v_0.Op != OpAMD64MOVLconst { 8318 break 8319 } 8320 x := v_0.AuxInt 8321 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 8322 break 8323 } 8324 v.reset(OpAMD64FlagLT_ULT) 8325 return true 8326 } 8327 // match: (CMPBconst (MOVLconst [x]) [y]) 8328 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 8329 // result: (FlagLT_UGT) 8330 for { 8331 y := v.AuxInt 8332 v_0 := v.Args[0] 8333 if v_0.Op != OpAMD64MOVLconst { 8334 break 8335 } 8336 x := v_0.AuxInt 8337 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 8338 break 8339 } 8340 v.reset(OpAMD64FlagLT_UGT) 8341 return true 8342 } 8343 // match: (CMPBconst (MOVLconst [x]) [y]) 8344 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 8345 // result: (FlagGT_ULT) 8346 for { 8347 y := v.AuxInt 8348 v_0 := v.Args[0] 8349 if v_0.Op != OpAMD64MOVLconst { 8350 break 8351 } 8352 x := v_0.AuxInt 8353 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 8354 break 8355 } 8356 v.reset(OpAMD64FlagGT_ULT) 8357 return true 8358 } 8359 // match: (CMPBconst (MOVLconst [x]) [y]) 8360 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 8361 // result: (FlagGT_UGT) 8362 for { 8363 y := v.AuxInt 8364 v_0 := v.Args[0] 8365 if v_0.Op != OpAMD64MOVLconst { 8366 break 8367 } 8368 x := v_0.AuxInt 8369 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 8370 break 8371 } 8372 v.reset(OpAMD64FlagGT_UGT) 8373 return true 8374 } 8375 // match: (CMPBconst (ANDLconst _ [m]) [n]) 8376 // cond: 0 <= int8(m) && int8(m) < int8(n) 8377 // result: (FlagLT_ULT) 8378 for { 8379 n := v.AuxInt 8380 v_0 := v.Args[0] 8381 if v_0.Op != OpAMD64ANDLconst { 8382 break 8383 } 8384 m := v_0.AuxInt 8385 if !(0 <= int8(m) && int8(m) < int8(n)) { 8386 break 8387 } 8388 v.reset(OpAMD64FlagLT_ULT) 8389 return true 8390 } 8391 // match: (CMPBconst (ANDL x y) [0]) 8392 // result: (TESTB x y) 8393 for { 8394 if v.AuxInt != 0 { 8395 break 8396 } 8397 v_0 := v.Args[0] 8398 if v_0.Op != OpAMD64ANDL { 8399 break 8400 } 8401 y := v_0.Args[1] 8402 x := v_0.Args[0] 8403 v.reset(OpAMD64TESTB) 8404 v.AddArg(x) 8405 v.AddArg(y) 8406 return true 8407 } 8408 // match: (CMPBconst (ANDLconst [c] x) [0]) 8409 // result: (TESTBconst [int64(int8(c))] x) 8410 for { 8411 if v.AuxInt != 0 { 8412 break 8413 } 8414 v_0 := v.Args[0] 8415 if v_0.Op != OpAMD64ANDLconst { 8416 break 8417 } 8418 c := v_0.AuxInt 8419 x := v_0.Args[0] 8420 v.reset(OpAMD64TESTBconst) 8421 v.AuxInt = int64(int8(c)) 8422 v.AddArg(x) 8423 return true 8424 } 8425 // match: (CMPBconst x [0]) 8426 // result: (TESTB x x) 8427 for { 8428 if v.AuxInt != 0 { 8429 break 8430 } 8431 x := v.Args[0] 8432 v.reset(OpAMD64TESTB) 8433 v.AddArg(x) 8434 v.AddArg(x) 8435 return true 8436 } 8437 // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) 8438 // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) 8439 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(c,off)] ptr mem) 8440 for { 8441 c := v.AuxInt 8442 l := v.Args[0] 8443 if l.Op != OpAMD64MOVBload { 8444 break 8445 } 8446 off := l.AuxInt 8447 sym := l.Aux 8448 mem := l.Args[1] 8449 ptr := l.Args[0] 8450 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { 8451 break 8452 } 8453 b = l.Block 8454 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) 8455 v.reset(OpCopy) 8456 v.AddArg(v0) 8457 v0.AuxInt = makeValAndOff(c, off) 8458 v0.Aux = sym 8459 v0.AddArg(ptr) 8460 v0.AddArg(mem) 8461 return true 8462 } 8463 return false 8464 } 8465 func rewriteValueAMD64_OpAMD64CMPBconstload_0(v *Value) bool { 8466 // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 8467 // cond: ValAndOff(valoff1).canAdd(off2) 8468 // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) 8469 for { 8470 valoff1 := v.AuxInt 8471 sym := v.Aux 8472 mem := v.Args[1] 8473 v_0 := v.Args[0] 8474 if v_0.Op != OpAMD64ADDQconst { 8475 break 8476 } 8477 off2 := v_0.AuxInt 8478 base := v_0.Args[0] 8479 if !(ValAndOff(valoff1).canAdd(off2)) { 8480 break 8481 } 8482 v.reset(OpAMD64CMPBconstload) 8483 v.AuxInt = ValAndOff(valoff1).add(off2) 8484 v.Aux = sym 8485 v.AddArg(base) 8486 v.AddArg(mem) 8487 return true 8488 } 8489 // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 8490 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 8491 // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 8492 for { 8493 valoff1 := v.AuxInt 8494 sym1 := v.Aux 8495 mem := v.Args[1] 8496 v_0 := v.Args[0] 8497 if v_0.Op != OpAMD64LEAQ { 8498 break 8499 } 8500 off2 := v_0.AuxInt 8501 sym2 := v_0.Aux 8502 base := v_0.Args[0] 8503 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 8504 break 8505 } 8506 v.reset(OpAMD64CMPBconstload) 8507 v.AuxInt = ValAndOff(valoff1).add(off2) 8508 v.Aux = mergeSym(sym1, sym2) 8509 v.AddArg(base) 8510 v.AddArg(mem) 8511 return true 8512 } 8513 return false 8514 } 8515 func rewriteValueAMD64_OpAMD64CMPBload_0(v *Value) bool { 8516 // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem) 8517 // cond: is32Bit(off1+off2) 8518 // result: (CMPBload [off1+off2] {sym} base val mem) 8519 for { 8520 off1 := v.AuxInt 8521 sym := v.Aux 8522 mem := v.Args[2] 8523 v_0 := v.Args[0] 8524 if v_0.Op != OpAMD64ADDQconst { 8525 break 8526 } 8527 off2 := v_0.AuxInt 8528 base := v_0.Args[0] 8529 val := v.Args[1] 8530 if !(is32Bit(off1 + off2)) { 8531 break 8532 } 8533 v.reset(OpAMD64CMPBload) 8534 v.AuxInt = off1 + off2 8535 v.Aux = sym 8536 v.AddArg(base) 8537 v.AddArg(val) 8538 v.AddArg(mem) 8539 return true 8540 } 8541 // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8542 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8543 // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8544 for { 8545 off1 := v.AuxInt 8546 sym1 := v.Aux 8547 mem := v.Args[2] 8548 v_0 := v.Args[0] 8549 if v_0.Op != OpAMD64LEAQ { 8550 break 8551 } 8552 off2 := v_0.AuxInt 8553 sym2 := v_0.Aux 8554 base := v_0.Args[0] 8555 val := v.Args[1] 8556 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8557 break 8558 } 8559 v.reset(OpAMD64CMPBload) 8560 v.AuxInt = off1 + off2 8561 v.Aux = mergeSym(sym1, sym2) 8562 v.AddArg(base) 8563 v.AddArg(val) 8564 v.AddArg(mem) 8565 return true 8566 } 8567 // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) 8568 // cond: validValAndOff(int64(int8(c)),off) 8569 // result: (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem) 8570 for { 8571 off := v.AuxInt 8572 sym := v.Aux 8573 mem := v.Args[2] 8574 ptr := v.Args[0] 8575 v_1 := v.Args[1] 8576 if v_1.Op != OpAMD64MOVLconst { 8577 break 8578 } 8579 c := v_1.AuxInt 8580 if !(validValAndOff(int64(int8(c)), off)) { 8581 break 8582 } 8583 v.reset(OpAMD64CMPBconstload) 8584 v.AuxInt = makeValAndOff(int64(int8(c)), off) 8585 v.Aux = sym 8586 v.AddArg(ptr) 8587 v.AddArg(mem) 8588 return true 8589 } 8590 return false 8591 } 8592 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { 8593 b := v.Block 8594 // match: (CMPL x (MOVLconst [c])) 8595 // result: (CMPLconst x [c]) 8596 for { 8597 _ = v.Args[1] 8598 x := v.Args[0] 8599 v_1 := v.Args[1] 8600 if v_1.Op != OpAMD64MOVLconst { 8601 break 8602 } 8603 c := v_1.AuxInt 8604 v.reset(OpAMD64CMPLconst) 8605 v.AuxInt = c 8606 v.AddArg(x) 8607 return true 8608 } 8609 // match: (CMPL (MOVLconst [c]) x) 8610 // result: (InvertFlags (CMPLconst x [c])) 8611 for { 8612 x := v.Args[1] 8613 v_0 := v.Args[0] 8614 if v_0.Op != OpAMD64MOVLconst { 8615 break 8616 } 8617 c := v_0.AuxInt 8618 v.reset(OpAMD64InvertFlags) 8619 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 8620 v0.AuxInt = c 8621 v0.AddArg(x) 8622 v.AddArg(v0) 8623 return true 8624 } 8625 // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x) 8626 // cond: canMergeLoad(v, l) && clobber(l) 8627 // result: (CMPLload {sym} [off] ptr x mem) 8628 for { 8629 x := v.Args[1] 8630 l := v.Args[0] 8631 if l.Op != OpAMD64MOVLload { 8632 break 8633 } 8634 off := l.AuxInt 8635 sym := l.Aux 8636 mem := l.Args[1] 8637 ptr := l.Args[0] 8638 if !(canMergeLoad(v, l) && clobber(l)) { 8639 break 8640 } 8641 v.reset(OpAMD64CMPLload) 8642 v.AuxInt = off 8643 v.Aux = sym 8644 v.AddArg(ptr) 8645 v.AddArg(x) 8646 v.AddArg(mem) 8647 return true 8648 } 8649 // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) 8650 // cond: canMergeLoad(v, l) && clobber(l) 8651 // result: (InvertFlags (CMPLload {sym} [off] ptr x mem)) 8652 for { 8653 _ = v.Args[1] 8654 x := v.Args[0] 8655 l := v.Args[1] 8656 if l.Op != OpAMD64MOVLload { 8657 break 8658 } 8659 off := l.AuxInt 8660 sym := l.Aux 8661 mem := l.Args[1] 8662 ptr := l.Args[0] 8663 if !(canMergeLoad(v, l) && clobber(l)) { 8664 break 8665 } 8666 v.reset(OpAMD64InvertFlags) 8667 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags) 8668 v0.AuxInt = off 8669 v0.Aux = sym 8670 v0.AddArg(ptr) 8671 v0.AddArg(x) 8672 v0.AddArg(mem) 8673 v.AddArg(v0) 8674 return true 8675 } 8676 return false 8677 } 8678 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { 8679 // match: (CMPLconst (MOVLconst [x]) [y]) 8680 // cond: int32(x)==int32(y) 8681 // result: (FlagEQ) 8682 for { 8683 y := v.AuxInt 8684 v_0 := v.Args[0] 8685 if v_0.Op != OpAMD64MOVLconst { 8686 break 8687 } 8688 x := v_0.AuxInt 8689 if !(int32(x) == int32(y)) { 8690 break 8691 } 8692 v.reset(OpAMD64FlagEQ) 8693 return true 8694 } 8695 // match: (CMPLconst (MOVLconst [x]) [y]) 8696 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 8697 // result: (FlagLT_ULT) 8698 for { 8699 y := v.AuxInt 8700 v_0 := v.Args[0] 8701 if v_0.Op != OpAMD64MOVLconst { 8702 break 8703 } 8704 x := v_0.AuxInt 8705 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 8706 break 8707 } 8708 v.reset(OpAMD64FlagLT_ULT) 8709 return true 8710 } 8711 // match: (CMPLconst (MOVLconst [x]) [y]) 8712 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 8713 // result: (FlagLT_UGT) 8714 for { 8715 y := v.AuxInt 8716 v_0 := v.Args[0] 8717 if v_0.Op != OpAMD64MOVLconst { 8718 break 8719 } 8720 x := v_0.AuxInt 8721 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 8722 break 8723 } 8724 v.reset(OpAMD64FlagLT_UGT) 8725 return true 8726 } 8727 // match: (CMPLconst (MOVLconst [x]) [y]) 8728 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 8729 // result: (FlagGT_ULT) 8730 for { 8731 y := v.AuxInt 8732 v_0 := v.Args[0] 8733 if v_0.Op != OpAMD64MOVLconst { 8734 break 8735 } 8736 x := v_0.AuxInt 8737 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 8738 break 8739 } 8740 v.reset(OpAMD64FlagGT_ULT) 8741 return true 8742 } 8743 // match: (CMPLconst (MOVLconst [x]) [y]) 8744 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 8745 // result: (FlagGT_UGT) 8746 for { 8747 y := v.AuxInt 8748 v_0 := v.Args[0] 8749 if v_0.Op != OpAMD64MOVLconst { 8750 break 8751 } 8752 x := v_0.AuxInt 8753 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 8754 break 8755 } 8756 v.reset(OpAMD64FlagGT_UGT) 8757 return true 8758 } 8759 // match: (CMPLconst (SHRLconst _ [c]) [n]) 8760 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 8761 // result: (FlagLT_ULT) 8762 for { 8763 n := v.AuxInt 8764 v_0 := v.Args[0] 8765 if v_0.Op != OpAMD64SHRLconst { 8766 break 8767 } 8768 c := v_0.AuxInt 8769 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 8770 break 8771 } 8772 v.reset(OpAMD64FlagLT_ULT) 8773 return true 8774 } 8775 // match: (CMPLconst (ANDLconst _ [m]) [n]) 8776 // cond: 0 <= int32(m) && int32(m) < int32(n) 8777 // result: (FlagLT_ULT) 8778 for { 8779 n := v.AuxInt 8780 v_0 := v.Args[0] 8781 if v_0.Op != OpAMD64ANDLconst { 8782 break 8783 } 8784 m := v_0.AuxInt 8785 if !(0 <= int32(m) && int32(m) < int32(n)) { 8786 break 8787 } 8788 v.reset(OpAMD64FlagLT_ULT) 8789 return true 8790 } 8791 // match: (CMPLconst (ANDL x y) [0]) 8792 // result: (TESTL x y) 8793 for { 8794 if v.AuxInt != 0 { 8795 break 8796 } 8797 v_0 := v.Args[0] 8798 if v_0.Op != OpAMD64ANDL { 8799 break 8800 } 8801 y := v_0.Args[1] 8802 x := v_0.Args[0] 8803 v.reset(OpAMD64TESTL) 8804 v.AddArg(x) 8805 v.AddArg(y) 8806 return true 8807 } 8808 // match: (CMPLconst (ANDLconst [c] x) [0]) 8809 // result: (TESTLconst [c] x) 8810 for { 8811 if v.AuxInt != 0 { 8812 break 8813 } 8814 v_0 := v.Args[0] 8815 if v_0.Op != OpAMD64ANDLconst { 8816 break 8817 } 8818 c := v_0.AuxInt 8819 x := v_0.Args[0] 8820 v.reset(OpAMD64TESTLconst) 8821 v.AuxInt = c 8822 v.AddArg(x) 8823 return true 8824 } 8825 // match: (CMPLconst x [0]) 8826 // result: (TESTL x x) 8827 for { 8828 if v.AuxInt != 0 { 8829 break 8830 } 8831 x := v.Args[0] 8832 v.reset(OpAMD64TESTL) 8833 v.AddArg(x) 8834 v.AddArg(x) 8835 return true 8836 } 8837 return false 8838 } 8839 func rewriteValueAMD64_OpAMD64CMPLconst_10(v *Value) bool { 8840 b := v.Block 8841 // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c]) 8842 // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) 8843 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) 8844 for { 8845 c := v.AuxInt 8846 l := v.Args[0] 8847 if l.Op != OpAMD64MOVLload { 8848 break 8849 } 8850 off := l.AuxInt 8851 sym := l.Aux 8852 mem := l.Args[1] 8853 ptr := l.Args[0] 8854 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { 8855 break 8856 } 8857 b = l.Block 8858 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) 8859 v.reset(OpCopy) 8860 v.AddArg(v0) 8861 v0.AuxInt = makeValAndOff(c, off) 8862 v0.Aux = sym 8863 v0.AddArg(ptr) 8864 v0.AddArg(mem) 8865 return true 8866 } 8867 return false 8868 } 8869 func rewriteValueAMD64_OpAMD64CMPLconstload_0(v *Value) bool { 8870 // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 8871 // cond: ValAndOff(valoff1).canAdd(off2) 8872 // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) 8873 for { 8874 valoff1 := v.AuxInt 8875 sym := v.Aux 8876 mem := v.Args[1] 8877 v_0 := v.Args[0] 8878 if v_0.Op != OpAMD64ADDQconst { 8879 break 8880 } 8881 off2 := v_0.AuxInt 8882 base := v_0.Args[0] 8883 if !(ValAndOff(valoff1).canAdd(off2)) { 8884 break 8885 } 8886 v.reset(OpAMD64CMPLconstload) 8887 v.AuxInt = ValAndOff(valoff1).add(off2) 8888 v.Aux = sym 8889 v.AddArg(base) 8890 v.AddArg(mem) 8891 return true 8892 } 8893 // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 8894 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 8895 // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 8896 for { 8897 valoff1 := v.AuxInt 8898 sym1 := v.Aux 8899 mem := v.Args[1] 8900 v_0 := v.Args[0] 8901 if v_0.Op != OpAMD64LEAQ { 8902 break 8903 } 8904 off2 := v_0.AuxInt 8905 sym2 := v_0.Aux 8906 base := v_0.Args[0] 8907 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 8908 break 8909 } 8910 v.reset(OpAMD64CMPLconstload) 8911 v.AuxInt = ValAndOff(valoff1).add(off2) 8912 v.Aux = mergeSym(sym1, sym2) 8913 v.AddArg(base) 8914 v.AddArg(mem) 8915 return true 8916 } 8917 return false 8918 } 8919 func rewriteValueAMD64_OpAMD64CMPLload_0(v *Value) bool { 8920 // match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem) 8921 // cond: is32Bit(off1+off2) 8922 // result: (CMPLload [off1+off2] {sym} base val mem) 8923 for { 8924 off1 := v.AuxInt 8925 sym := v.Aux 8926 mem := v.Args[2] 8927 v_0 := v.Args[0] 8928 if v_0.Op != OpAMD64ADDQconst { 8929 break 8930 } 8931 off2 := v_0.AuxInt 8932 base := v_0.Args[0] 8933 val := v.Args[1] 8934 if !(is32Bit(off1 + off2)) { 8935 break 8936 } 8937 v.reset(OpAMD64CMPLload) 8938 v.AuxInt = off1 + off2 8939 v.Aux = sym 8940 v.AddArg(base) 8941 v.AddArg(val) 8942 v.AddArg(mem) 8943 return true 8944 } 8945 // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8946 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8947 // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8948 for { 8949 off1 := v.AuxInt 8950 sym1 := v.Aux 8951 mem := v.Args[2] 8952 v_0 := v.Args[0] 8953 if v_0.Op != OpAMD64LEAQ { 8954 break 8955 } 8956 off2 := v_0.AuxInt 8957 sym2 := v_0.Aux 8958 base := v_0.Args[0] 8959 val := v.Args[1] 8960 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8961 break 8962 } 8963 v.reset(OpAMD64CMPLload) 8964 v.AuxInt = off1 + off2 8965 v.Aux = mergeSym(sym1, sym2) 8966 v.AddArg(base) 8967 v.AddArg(val) 8968 v.AddArg(mem) 8969 return true 8970 } 8971 // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) 8972 // cond: validValAndOff(c,off) 8973 // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) 8974 for { 8975 off := v.AuxInt 8976 sym := v.Aux 8977 mem := v.Args[2] 8978 ptr := v.Args[0] 8979 v_1 := v.Args[1] 8980 if v_1.Op != OpAMD64MOVLconst { 8981 break 8982 } 8983 c := v_1.AuxInt 8984 if !(validValAndOff(c, off)) { 8985 break 8986 } 8987 v.reset(OpAMD64CMPLconstload) 8988 v.AuxInt = makeValAndOff(c, off) 8989 v.Aux = sym 8990 v.AddArg(ptr) 8991 v.AddArg(mem) 8992 return true 8993 } 8994 return false 8995 } 8996 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { 8997 b := v.Block 8998 // match: (CMPQ x (MOVQconst [c])) 8999 // cond: is32Bit(c) 9000 // result: (CMPQconst x [c]) 9001 for { 9002 _ = v.Args[1] 9003 x := v.Args[0] 9004 v_1 := v.Args[1] 9005 if v_1.Op != OpAMD64MOVQconst { 9006 break 9007 } 9008 c := v_1.AuxInt 9009 if !(is32Bit(c)) { 9010 break 9011 } 9012 v.reset(OpAMD64CMPQconst) 9013 v.AuxInt = c 9014 v.AddArg(x) 9015 return true 9016 } 9017 // match: (CMPQ (MOVQconst [c]) x) 9018 // cond: is32Bit(c) 9019 // result: (InvertFlags (CMPQconst x [c])) 9020 for { 9021 x := v.Args[1] 9022 v_0 := v.Args[0] 9023 if v_0.Op != OpAMD64MOVQconst { 9024 break 9025 } 9026 c := v_0.AuxInt 9027 if !(is32Bit(c)) { 9028 break 9029 } 9030 v.reset(OpAMD64InvertFlags) 9031 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 9032 v0.AuxInt = c 9033 v0.AddArg(x) 9034 v.AddArg(v0) 9035 return true 9036 } 9037 // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x) 9038 // cond: canMergeLoad(v, l) && clobber(l) 9039 // result: (CMPQload {sym} [off] ptr x mem) 9040 for { 9041 x := v.Args[1] 9042 l := v.Args[0] 9043 if l.Op != OpAMD64MOVQload { 9044 break 9045 } 9046 off := l.AuxInt 9047 sym := l.Aux 9048 mem := l.Args[1] 9049 ptr := l.Args[0] 9050 if !(canMergeLoad(v, l) && clobber(l)) { 9051 break 9052 } 9053 v.reset(OpAMD64CMPQload) 9054 v.AuxInt = off 9055 v.Aux = sym 9056 v.AddArg(ptr) 9057 v.AddArg(x) 9058 v.AddArg(mem) 9059 return true 9060 } 9061 // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem)) 9062 // cond: canMergeLoad(v, l) && clobber(l) 9063 // result: (InvertFlags (CMPQload {sym} [off] ptr x mem)) 9064 for { 9065 _ = v.Args[1] 9066 x := v.Args[0] 9067 l := v.Args[1] 9068 if l.Op != OpAMD64MOVQload { 9069 break 9070 } 9071 off := l.AuxInt 9072 sym := l.Aux 9073 mem := l.Args[1] 9074 ptr := l.Args[0] 9075 if !(canMergeLoad(v, l) && clobber(l)) { 9076 break 9077 } 9078 v.reset(OpAMD64InvertFlags) 9079 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags) 9080 v0.AuxInt = off 9081 v0.Aux = sym 9082 v0.AddArg(ptr) 9083 v0.AddArg(x) 9084 v0.AddArg(mem) 9085 v.AddArg(v0) 9086 return true 9087 } 9088 return false 9089 } 9090 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { 9091 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) 9092 // result: (FlagLT_ULT) 9093 for { 9094 if v.AuxInt != 32 { 9095 break 9096 } 9097 v_0 := v.Args[0] 9098 if v_0.Op != OpAMD64NEGQ { 9099 break 9100 } 9101 v_0_0 := v_0.Args[0] 9102 if v_0_0.Op != OpAMD64ADDQconst || v_0_0.AuxInt != -16 { 9103 break 9104 } 9105 v_0_0_0 := v_0_0.Args[0] 9106 if v_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0.AuxInt != 15 { 9107 break 9108 } 9109 v.reset(OpAMD64FlagLT_ULT) 9110 return true 9111 } 9112 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) 9113 // result: (FlagLT_ULT) 9114 for { 9115 if v.AuxInt != 32 { 9116 break 9117 } 9118 v_0 := v.Args[0] 9119 if v_0.Op != OpAMD64NEGQ { 9120 break 9121 } 9122 v_0_0 := v_0.Args[0] 9123 if v_0_0.Op != OpAMD64ADDQconst || v_0_0.AuxInt != -8 { 9124 break 9125 } 9126 v_0_0_0 := v_0_0.Args[0] 9127 if v_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0.AuxInt != 7 { 9128 break 9129 } 9130 v.reset(OpAMD64FlagLT_ULT) 9131 return true 9132 } 9133 // match: (CMPQconst (MOVQconst [x]) [y]) 9134 // cond: x==y 9135 // result: (FlagEQ) 9136 for { 9137 y := v.AuxInt 9138 v_0 := v.Args[0] 9139 if v_0.Op != OpAMD64MOVQconst { 9140 break 9141 } 9142 x := v_0.AuxInt 9143 if !(x == y) { 9144 break 9145 } 9146 v.reset(OpAMD64FlagEQ) 9147 return true 9148 } 9149 // match: (CMPQconst (MOVQconst [x]) [y]) 9150 // cond: x<y && uint64(x)<uint64(y) 9151 // result: (FlagLT_ULT) 9152 for { 9153 y := v.AuxInt 9154 v_0 := v.Args[0] 9155 if v_0.Op != OpAMD64MOVQconst { 9156 break 9157 } 9158 x := v_0.AuxInt 9159 if !(x < y && uint64(x) < uint64(y)) { 9160 break 9161 } 9162 v.reset(OpAMD64FlagLT_ULT) 9163 return true 9164 } 9165 // match: (CMPQconst (MOVQconst [x]) [y]) 9166 // cond: x<y && uint64(x)>uint64(y) 9167 // result: (FlagLT_UGT) 9168 for { 9169 y := v.AuxInt 9170 v_0 := v.Args[0] 9171 if v_0.Op != OpAMD64MOVQconst { 9172 break 9173 } 9174 x := v_0.AuxInt 9175 if !(x < y && uint64(x) > uint64(y)) { 9176 break 9177 } 9178 v.reset(OpAMD64FlagLT_UGT) 9179 return true 9180 } 9181 // match: (CMPQconst (MOVQconst [x]) [y]) 9182 // cond: x>y && uint64(x)<uint64(y) 9183 // result: (FlagGT_ULT) 9184 for { 9185 y := v.AuxInt 9186 v_0 := v.Args[0] 9187 if v_0.Op != OpAMD64MOVQconst { 9188 break 9189 } 9190 x := v_0.AuxInt 9191 if !(x > y && uint64(x) < uint64(y)) { 9192 break 9193 } 9194 v.reset(OpAMD64FlagGT_ULT) 9195 return true 9196 } 9197 // match: (CMPQconst (MOVQconst [x]) [y]) 9198 // cond: x>y && uint64(x)>uint64(y) 9199 // result: (FlagGT_UGT) 9200 for { 9201 y := v.AuxInt 9202 v_0 := v.Args[0] 9203 if v_0.Op != OpAMD64MOVQconst { 9204 break 9205 } 9206 x := v_0.AuxInt 9207 if !(x > y && uint64(x) > uint64(y)) { 9208 break 9209 } 9210 v.reset(OpAMD64FlagGT_UGT) 9211 return true 9212 } 9213 // match: (CMPQconst (MOVBQZX _) [c]) 9214 // cond: 0xFF < c 9215 // result: (FlagLT_ULT) 9216 for { 9217 c := v.AuxInt 9218 v_0 := v.Args[0] 9219 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) { 9220 break 9221 } 9222 v.reset(OpAMD64FlagLT_ULT) 9223 return true 9224 } 9225 // match: (CMPQconst (MOVWQZX _) [c]) 9226 // cond: 0xFFFF < c 9227 // result: (FlagLT_ULT) 9228 for { 9229 c := v.AuxInt 9230 v_0 := v.Args[0] 9231 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) { 9232 break 9233 } 9234 v.reset(OpAMD64FlagLT_ULT) 9235 return true 9236 } 9237 // match: (CMPQconst (MOVLQZX _) [c]) 9238 // cond: 0xFFFFFFFF < c 9239 // result: (FlagLT_ULT) 9240 for { 9241 c := v.AuxInt 9242 v_0 := v.Args[0] 9243 if v_0.Op != OpAMD64MOVLQZX || !(0xFFFFFFFF < c) { 9244 break 9245 } 9246 v.reset(OpAMD64FlagLT_ULT) 9247 return true 9248 } 9249 return false 9250 } 9251 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { 9252 b := v.Block 9253 // match: (CMPQconst (SHRQconst _ [c]) [n]) 9254 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 9255 // result: (FlagLT_ULT) 9256 for { 9257 n := v.AuxInt 9258 v_0 := v.Args[0] 9259 if v_0.Op != OpAMD64SHRQconst { 9260 break 9261 } 9262 c := v_0.AuxInt 9263 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 9264 break 9265 } 9266 v.reset(OpAMD64FlagLT_ULT) 9267 return true 9268 } 9269 // match: (CMPQconst (ANDQconst _ [m]) [n]) 9270 // cond: 0 <= m && m < n 9271 // result: (FlagLT_ULT) 9272 for { 9273 n := v.AuxInt 9274 v_0 := v.Args[0] 9275 if v_0.Op != OpAMD64ANDQconst { 9276 break 9277 } 9278 m := v_0.AuxInt 9279 if !(0 <= m && m < n) { 9280 break 9281 } 9282 v.reset(OpAMD64FlagLT_ULT) 9283 return true 9284 } 9285 // match: (CMPQconst (ANDLconst _ [m]) [n]) 9286 // cond: 0 <= m && m < n 9287 // result: (FlagLT_ULT) 9288 for { 9289 n := v.AuxInt 9290 v_0 := v.Args[0] 9291 if v_0.Op != OpAMD64ANDLconst { 9292 break 9293 } 9294 m := v_0.AuxInt 9295 if !(0 <= m && m < n) { 9296 break 9297 } 9298 v.reset(OpAMD64FlagLT_ULT) 9299 return true 9300 } 9301 // match: (CMPQconst (ANDQ x y) [0]) 9302 // result: (TESTQ x y) 9303 for { 9304 if v.AuxInt != 0 { 9305 break 9306 } 9307 v_0 := v.Args[0] 9308 if v_0.Op != OpAMD64ANDQ { 9309 break 9310 } 9311 y := v_0.Args[1] 9312 x := v_0.Args[0] 9313 v.reset(OpAMD64TESTQ) 9314 v.AddArg(x) 9315 v.AddArg(y) 9316 return true 9317 } 9318 // match: (CMPQconst (ANDQconst [c] x) [0]) 9319 // result: (TESTQconst [c] x) 9320 for { 9321 if v.AuxInt != 0 { 9322 break 9323 } 9324 v_0 := v.Args[0] 9325 if v_0.Op != OpAMD64ANDQconst { 9326 break 9327 } 9328 c := v_0.AuxInt 9329 x := v_0.Args[0] 9330 v.reset(OpAMD64TESTQconst) 9331 v.AuxInt = c 9332 v.AddArg(x) 9333 return true 9334 } 9335 // match: (CMPQconst x [0]) 9336 // result: (TESTQ x x) 9337 for { 9338 if v.AuxInt != 0 { 9339 break 9340 } 9341 x := v.Args[0] 9342 v.reset(OpAMD64TESTQ) 9343 v.AddArg(x) 9344 v.AddArg(x) 9345 return true 9346 } 9347 // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c]) 9348 // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) 9349 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) 9350 for { 9351 c := v.AuxInt 9352 l := v.Args[0] 9353 if l.Op != OpAMD64MOVQload { 9354 break 9355 } 9356 off := l.AuxInt 9357 sym := l.Aux 9358 mem := l.Args[1] 9359 ptr := l.Args[0] 9360 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { 9361 break 9362 } 9363 b = l.Block 9364 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) 9365 v.reset(OpCopy) 9366 v.AddArg(v0) 9367 v0.AuxInt = makeValAndOff(c, off) 9368 v0.Aux = sym 9369 v0.AddArg(ptr) 9370 v0.AddArg(mem) 9371 return true 9372 } 9373 return false 9374 } 9375 func rewriteValueAMD64_OpAMD64CMPQconstload_0(v *Value) bool { 9376 // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 9377 // cond: ValAndOff(valoff1).canAdd(off2) 9378 // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) 9379 for { 9380 valoff1 := v.AuxInt 9381 sym := v.Aux 9382 mem := v.Args[1] 9383 v_0 := v.Args[0] 9384 if v_0.Op != OpAMD64ADDQconst { 9385 break 9386 } 9387 off2 := v_0.AuxInt 9388 base := v_0.Args[0] 9389 if !(ValAndOff(valoff1).canAdd(off2)) { 9390 break 9391 } 9392 v.reset(OpAMD64CMPQconstload) 9393 v.AuxInt = ValAndOff(valoff1).add(off2) 9394 v.Aux = sym 9395 v.AddArg(base) 9396 v.AddArg(mem) 9397 return true 9398 } 9399 // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 9400 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 9401 // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 9402 for { 9403 valoff1 := v.AuxInt 9404 sym1 := v.Aux 9405 mem := v.Args[1] 9406 v_0 := v.Args[0] 9407 if v_0.Op != OpAMD64LEAQ { 9408 break 9409 } 9410 off2 := v_0.AuxInt 9411 sym2 := v_0.Aux 9412 base := v_0.Args[0] 9413 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 9414 break 9415 } 9416 v.reset(OpAMD64CMPQconstload) 9417 v.AuxInt = ValAndOff(valoff1).add(off2) 9418 v.Aux = mergeSym(sym1, sym2) 9419 v.AddArg(base) 9420 v.AddArg(mem) 9421 return true 9422 } 9423 return false 9424 } 9425 func rewriteValueAMD64_OpAMD64CMPQload_0(v *Value) bool { 9426 // match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem) 9427 // cond: is32Bit(off1+off2) 9428 // result: (CMPQload [off1+off2] {sym} base val mem) 9429 for { 9430 off1 := v.AuxInt 9431 sym := v.Aux 9432 mem := v.Args[2] 9433 v_0 := v.Args[0] 9434 if v_0.Op != OpAMD64ADDQconst { 9435 break 9436 } 9437 off2 := v_0.AuxInt 9438 base := v_0.Args[0] 9439 val := v.Args[1] 9440 if !(is32Bit(off1 + off2)) { 9441 break 9442 } 9443 v.reset(OpAMD64CMPQload) 9444 v.AuxInt = off1 + off2 9445 v.Aux = sym 9446 v.AddArg(base) 9447 v.AddArg(val) 9448 v.AddArg(mem) 9449 return true 9450 } 9451 // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9452 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9453 // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9454 for { 9455 off1 := v.AuxInt 9456 sym1 := v.Aux 9457 mem := v.Args[2] 9458 v_0 := v.Args[0] 9459 if v_0.Op != OpAMD64LEAQ { 9460 break 9461 } 9462 off2 := v_0.AuxInt 9463 sym2 := v_0.Aux 9464 base := v_0.Args[0] 9465 val := v.Args[1] 9466 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9467 break 9468 } 9469 v.reset(OpAMD64CMPQload) 9470 v.AuxInt = off1 + off2 9471 v.Aux = mergeSym(sym1, sym2) 9472 v.AddArg(base) 9473 v.AddArg(val) 9474 v.AddArg(mem) 9475 return true 9476 } 9477 // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) 9478 // cond: validValAndOff(c,off) 9479 // result: (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) 9480 for { 9481 off := v.AuxInt 9482 sym := v.Aux 9483 mem := v.Args[2] 9484 ptr := v.Args[0] 9485 v_1 := v.Args[1] 9486 if v_1.Op != OpAMD64MOVQconst { 9487 break 9488 } 9489 c := v_1.AuxInt 9490 if !(validValAndOff(c, off)) { 9491 break 9492 } 9493 v.reset(OpAMD64CMPQconstload) 9494 v.AuxInt = makeValAndOff(c, off) 9495 v.Aux = sym 9496 v.AddArg(ptr) 9497 v.AddArg(mem) 9498 return true 9499 } 9500 return false 9501 } 9502 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { 9503 b := v.Block 9504 // match: (CMPW x (MOVLconst [c])) 9505 // result: (CMPWconst x [int64(int16(c))]) 9506 for { 9507 _ = v.Args[1] 9508 x := v.Args[0] 9509 v_1 := v.Args[1] 9510 if v_1.Op != OpAMD64MOVLconst { 9511 break 9512 } 9513 c := v_1.AuxInt 9514 v.reset(OpAMD64CMPWconst) 9515 v.AuxInt = int64(int16(c)) 9516 v.AddArg(x) 9517 return true 9518 } 9519 // match: (CMPW (MOVLconst [c]) x) 9520 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 9521 for { 9522 x := v.Args[1] 9523 v_0 := v.Args[0] 9524 if v_0.Op != OpAMD64MOVLconst { 9525 break 9526 } 9527 c := v_0.AuxInt 9528 v.reset(OpAMD64InvertFlags) 9529 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 9530 v0.AuxInt = int64(int16(c)) 9531 v0.AddArg(x) 9532 v.AddArg(v0) 9533 return true 9534 } 9535 // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x) 9536 // cond: canMergeLoad(v, l) && clobber(l) 9537 // result: (CMPWload {sym} [off] ptr x mem) 9538 for { 9539 x := v.Args[1] 9540 l := v.Args[0] 9541 if l.Op != OpAMD64MOVWload { 9542 break 9543 } 9544 off := l.AuxInt 9545 sym := l.Aux 9546 mem := l.Args[1] 9547 ptr := l.Args[0] 9548 if !(canMergeLoad(v, l) && clobber(l)) { 9549 break 9550 } 9551 v.reset(OpAMD64CMPWload) 9552 v.AuxInt = off 9553 v.Aux = sym 9554 v.AddArg(ptr) 9555 v.AddArg(x) 9556 v.AddArg(mem) 9557 return true 9558 } 9559 // match: (CMPW x l:(MOVWload {sym} [off] ptr mem)) 9560 // cond: canMergeLoad(v, l) && clobber(l) 9561 // result: (InvertFlags (CMPWload {sym} [off] ptr x mem)) 9562 for { 9563 _ = v.Args[1] 9564 x := v.Args[0] 9565 l := v.Args[1] 9566 if l.Op != OpAMD64MOVWload { 9567 break 9568 } 9569 off := l.AuxInt 9570 sym := l.Aux 9571 mem := l.Args[1] 9572 ptr := l.Args[0] 9573 if !(canMergeLoad(v, l) && clobber(l)) { 9574 break 9575 } 9576 v.reset(OpAMD64InvertFlags) 9577 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags) 9578 v0.AuxInt = off 9579 v0.Aux = sym 9580 v0.AddArg(ptr) 9581 v0.AddArg(x) 9582 v0.AddArg(mem) 9583 v.AddArg(v0) 9584 return true 9585 } 9586 return false 9587 } 9588 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { 9589 b := v.Block 9590 // match: (CMPWconst (MOVLconst [x]) [y]) 9591 // cond: int16(x)==int16(y) 9592 // result: (FlagEQ) 9593 for { 9594 y := v.AuxInt 9595 v_0 := v.Args[0] 9596 if v_0.Op != OpAMD64MOVLconst { 9597 break 9598 } 9599 x := v_0.AuxInt 9600 if !(int16(x) == int16(y)) { 9601 break 9602 } 9603 v.reset(OpAMD64FlagEQ) 9604 return true 9605 } 9606 // match: (CMPWconst (MOVLconst [x]) [y]) 9607 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 9608 // result: (FlagLT_ULT) 9609 for { 9610 y := v.AuxInt 9611 v_0 := v.Args[0] 9612 if v_0.Op != OpAMD64MOVLconst { 9613 break 9614 } 9615 x := v_0.AuxInt 9616 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 9617 break 9618 } 9619 v.reset(OpAMD64FlagLT_ULT) 9620 return true 9621 } 9622 // match: (CMPWconst (MOVLconst [x]) [y]) 9623 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 9624 // result: (FlagLT_UGT) 9625 for { 9626 y := v.AuxInt 9627 v_0 := v.Args[0] 9628 if v_0.Op != OpAMD64MOVLconst { 9629 break 9630 } 9631 x := v_0.AuxInt 9632 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 9633 break 9634 } 9635 v.reset(OpAMD64FlagLT_UGT) 9636 return true 9637 } 9638 // match: (CMPWconst (MOVLconst [x]) [y]) 9639 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 9640 // result: (FlagGT_ULT) 9641 for { 9642 y := v.AuxInt 9643 v_0 := v.Args[0] 9644 if v_0.Op != OpAMD64MOVLconst { 9645 break 9646 } 9647 x := v_0.AuxInt 9648 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 9649 break 9650 } 9651 v.reset(OpAMD64FlagGT_ULT) 9652 return true 9653 } 9654 // match: (CMPWconst (MOVLconst [x]) [y]) 9655 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 9656 // result: (FlagGT_UGT) 9657 for { 9658 y := v.AuxInt 9659 v_0 := v.Args[0] 9660 if v_0.Op != OpAMD64MOVLconst { 9661 break 9662 } 9663 x := v_0.AuxInt 9664 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 9665 break 9666 } 9667 v.reset(OpAMD64FlagGT_UGT) 9668 return true 9669 } 9670 // match: (CMPWconst (ANDLconst _ [m]) [n]) 9671 // cond: 0 <= int16(m) && int16(m) < int16(n) 9672 // result: (FlagLT_ULT) 9673 for { 9674 n := v.AuxInt 9675 v_0 := v.Args[0] 9676 if v_0.Op != OpAMD64ANDLconst { 9677 break 9678 } 9679 m := v_0.AuxInt 9680 if !(0 <= int16(m) && int16(m) < int16(n)) { 9681 break 9682 } 9683 v.reset(OpAMD64FlagLT_ULT) 9684 return true 9685 } 9686 // match: (CMPWconst (ANDL x y) [0]) 9687 // result: (TESTW x y) 9688 for { 9689 if v.AuxInt != 0 { 9690 break 9691 } 9692 v_0 := v.Args[0] 9693 if v_0.Op != OpAMD64ANDL { 9694 break 9695 } 9696 y := v_0.Args[1] 9697 x := v_0.Args[0] 9698 v.reset(OpAMD64TESTW) 9699 v.AddArg(x) 9700 v.AddArg(y) 9701 return true 9702 } 9703 // match: (CMPWconst (ANDLconst [c] x) [0]) 9704 // result: (TESTWconst [int64(int16(c))] x) 9705 for { 9706 if v.AuxInt != 0 { 9707 break 9708 } 9709 v_0 := v.Args[0] 9710 if v_0.Op != OpAMD64ANDLconst { 9711 break 9712 } 9713 c := v_0.AuxInt 9714 x := v_0.Args[0] 9715 v.reset(OpAMD64TESTWconst) 9716 v.AuxInt = int64(int16(c)) 9717 v.AddArg(x) 9718 return true 9719 } 9720 // match: (CMPWconst x [0]) 9721 // result: (TESTW x x) 9722 for { 9723 if v.AuxInt != 0 { 9724 break 9725 } 9726 x := v.Args[0] 9727 v.reset(OpAMD64TESTW) 9728 v.AddArg(x) 9729 v.AddArg(x) 9730 return true 9731 } 9732 // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) 9733 // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) 9734 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(c,off)] ptr mem) 9735 for { 9736 c := v.AuxInt 9737 l := v.Args[0] 9738 if l.Op != OpAMD64MOVWload { 9739 break 9740 } 9741 off := l.AuxInt 9742 sym := l.Aux 9743 mem := l.Args[1] 9744 ptr := l.Args[0] 9745 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { 9746 break 9747 } 9748 b = l.Block 9749 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) 9750 v.reset(OpCopy) 9751 v.AddArg(v0) 9752 v0.AuxInt = makeValAndOff(c, off) 9753 v0.Aux = sym 9754 v0.AddArg(ptr) 9755 v0.AddArg(mem) 9756 return true 9757 } 9758 return false 9759 } 9760 func rewriteValueAMD64_OpAMD64CMPWconstload_0(v *Value) bool { 9761 // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem) 9762 // cond: ValAndOff(valoff1).canAdd(off2) 9763 // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {sym} base mem) 9764 for { 9765 valoff1 := v.AuxInt 9766 sym := v.Aux 9767 mem := v.Args[1] 9768 v_0 := v.Args[0] 9769 if v_0.Op != OpAMD64ADDQconst { 9770 break 9771 } 9772 off2 := v_0.AuxInt 9773 base := v_0.Args[0] 9774 if !(ValAndOff(valoff1).canAdd(off2)) { 9775 break 9776 } 9777 v.reset(OpAMD64CMPWconstload) 9778 v.AuxInt = ValAndOff(valoff1).add(off2) 9779 v.Aux = sym 9780 v.AddArg(base) 9781 v.AddArg(mem) 9782 return true 9783 } 9784 // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 9785 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 9786 // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 9787 for { 9788 valoff1 := v.AuxInt 9789 sym1 := v.Aux 9790 mem := v.Args[1] 9791 v_0 := v.Args[0] 9792 if v_0.Op != OpAMD64LEAQ { 9793 break 9794 } 9795 off2 := v_0.AuxInt 9796 sym2 := v_0.Aux 9797 base := v_0.Args[0] 9798 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 9799 break 9800 } 9801 v.reset(OpAMD64CMPWconstload) 9802 v.AuxInt = ValAndOff(valoff1).add(off2) 9803 v.Aux = mergeSym(sym1, sym2) 9804 v.AddArg(base) 9805 v.AddArg(mem) 9806 return true 9807 } 9808 return false 9809 } 9810 func rewriteValueAMD64_OpAMD64CMPWload_0(v *Value) bool { 9811 // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem) 9812 // cond: is32Bit(off1+off2) 9813 // result: (CMPWload [off1+off2] {sym} base val mem) 9814 for { 9815 off1 := v.AuxInt 9816 sym := v.Aux 9817 mem := v.Args[2] 9818 v_0 := v.Args[0] 9819 if v_0.Op != OpAMD64ADDQconst { 9820 break 9821 } 9822 off2 := v_0.AuxInt 9823 base := v_0.Args[0] 9824 val := v.Args[1] 9825 if !(is32Bit(off1 + off2)) { 9826 break 9827 } 9828 v.reset(OpAMD64CMPWload) 9829 v.AuxInt = off1 + off2 9830 v.Aux = sym 9831 v.AddArg(base) 9832 v.AddArg(val) 9833 v.AddArg(mem) 9834 return true 9835 } 9836 // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9837 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9838 // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9839 for { 9840 off1 := v.AuxInt 9841 sym1 := v.Aux 9842 mem := v.Args[2] 9843 v_0 := v.Args[0] 9844 if v_0.Op != OpAMD64LEAQ { 9845 break 9846 } 9847 off2 := v_0.AuxInt 9848 sym2 := v_0.Aux 9849 base := v_0.Args[0] 9850 val := v.Args[1] 9851 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9852 break 9853 } 9854 v.reset(OpAMD64CMPWload) 9855 v.AuxInt = off1 + off2 9856 v.Aux = mergeSym(sym1, sym2) 9857 v.AddArg(base) 9858 v.AddArg(val) 9859 v.AddArg(mem) 9860 return true 9861 } 9862 // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) 9863 // cond: validValAndOff(int64(int16(c)),off) 9864 // result: (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem) 9865 for { 9866 off := v.AuxInt 9867 sym := v.Aux 9868 mem := v.Args[2] 9869 ptr := v.Args[0] 9870 v_1 := v.Args[1] 9871 if v_1.Op != OpAMD64MOVLconst { 9872 break 9873 } 9874 c := v_1.AuxInt 9875 if !(validValAndOff(int64(int16(c)), off)) { 9876 break 9877 } 9878 v.reset(OpAMD64CMPWconstload) 9879 v.AuxInt = makeValAndOff(int64(int16(c)), off) 9880 v.Aux = sym 9881 v.AddArg(ptr) 9882 v.AddArg(mem) 9883 return true 9884 } 9885 return false 9886 } 9887 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { 9888 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 9889 // cond: is32Bit(off1+off2) 9890 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 9891 for { 9892 off1 := v.AuxInt 9893 sym := v.Aux 9894 mem := v.Args[3] 9895 v_0 := v.Args[0] 9896 if v_0.Op != OpAMD64ADDQconst { 9897 break 9898 } 9899 off2 := v_0.AuxInt 9900 ptr := v_0.Args[0] 9901 old := v.Args[1] 9902 new_ := v.Args[2] 9903 if !(is32Bit(off1 + off2)) { 9904 break 9905 } 9906 v.reset(OpAMD64CMPXCHGLlock) 9907 v.AuxInt = off1 + off2 9908 v.Aux = sym 9909 v.AddArg(ptr) 9910 v.AddArg(old) 9911 v.AddArg(new_) 9912 v.AddArg(mem) 9913 return true 9914 } 9915 return false 9916 } 9917 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { 9918 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 9919 // cond: is32Bit(off1+off2) 9920 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 9921 for { 9922 off1 := v.AuxInt 9923 sym := v.Aux 9924 mem := v.Args[3] 9925 v_0 := v.Args[0] 9926 if v_0.Op != OpAMD64ADDQconst { 9927 break 9928 } 9929 off2 := v_0.AuxInt 9930 ptr := v_0.Args[0] 9931 old := v.Args[1] 9932 new_ := v.Args[2] 9933 if !(is32Bit(off1 + off2)) { 9934 break 9935 } 9936 v.reset(OpAMD64CMPXCHGQlock) 9937 v.AuxInt = off1 + off2 9938 v.Aux = sym 9939 v.AddArg(ptr) 9940 v.AddArg(old) 9941 v.AddArg(new_) 9942 v.AddArg(mem) 9943 return true 9944 } 9945 return false 9946 } 9947 func rewriteValueAMD64_OpAMD64DIVSD_0(v *Value) bool { 9948 // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem)) 9949 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 9950 // result: (DIVSDload x [off] {sym} ptr mem) 9951 for { 9952 _ = v.Args[1] 9953 x := v.Args[0] 9954 l := v.Args[1] 9955 if l.Op != OpAMD64MOVSDload { 9956 break 9957 } 9958 off := l.AuxInt 9959 sym := l.Aux 9960 mem := l.Args[1] 9961 ptr := l.Args[0] 9962 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 9963 break 9964 } 9965 v.reset(OpAMD64DIVSDload) 9966 v.AuxInt = off 9967 v.Aux = sym 9968 v.AddArg(x) 9969 v.AddArg(ptr) 9970 v.AddArg(mem) 9971 return true 9972 } 9973 return false 9974 } 9975 func rewriteValueAMD64_OpAMD64DIVSDload_0(v *Value) bool { 9976 // match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem) 9977 // cond: is32Bit(off1+off2) 9978 // result: (DIVSDload [off1+off2] {sym} val base mem) 9979 for { 9980 off1 := v.AuxInt 9981 sym := v.Aux 9982 mem := v.Args[2] 9983 val := v.Args[0] 9984 v_1 := v.Args[1] 9985 if v_1.Op != OpAMD64ADDQconst { 9986 break 9987 } 9988 off2 := v_1.AuxInt 9989 base := v_1.Args[0] 9990 if !(is32Bit(off1 + off2)) { 9991 break 9992 } 9993 v.reset(OpAMD64DIVSDload) 9994 v.AuxInt = off1 + off2 9995 v.Aux = sym 9996 v.AddArg(val) 9997 v.AddArg(base) 9998 v.AddArg(mem) 9999 return true 10000 } 10001 // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 10002 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10003 // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 10004 for { 10005 off1 := v.AuxInt 10006 sym1 := v.Aux 10007 mem := v.Args[2] 10008 val := v.Args[0] 10009 v_1 := v.Args[1] 10010 if v_1.Op != OpAMD64LEAQ { 10011 break 10012 } 10013 off2 := v_1.AuxInt 10014 sym2 := v_1.Aux 10015 base := v_1.Args[0] 10016 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10017 break 10018 } 10019 v.reset(OpAMD64DIVSDload) 10020 v.AuxInt = off1 + off2 10021 v.Aux = mergeSym(sym1, sym2) 10022 v.AddArg(val) 10023 v.AddArg(base) 10024 v.AddArg(mem) 10025 return true 10026 } 10027 return false 10028 } 10029 func rewriteValueAMD64_OpAMD64DIVSS_0(v *Value) bool { 10030 // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem)) 10031 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 10032 // result: (DIVSSload x [off] {sym} ptr mem) 10033 for { 10034 _ = v.Args[1] 10035 x := v.Args[0] 10036 l := v.Args[1] 10037 if l.Op != OpAMD64MOVSSload { 10038 break 10039 } 10040 off := l.AuxInt 10041 sym := l.Aux 10042 mem := l.Args[1] 10043 ptr := l.Args[0] 10044 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 10045 break 10046 } 10047 v.reset(OpAMD64DIVSSload) 10048 v.AuxInt = off 10049 v.Aux = sym 10050 v.AddArg(x) 10051 v.AddArg(ptr) 10052 v.AddArg(mem) 10053 return true 10054 } 10055 return false 10056 } 10057 func rewriteValueAMD64_OpAMD64DIVSSload_0(v *Value) bool { 10058 // match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem) 10059 // cond: is32Bit(off1+off2) 10060 // result: (DIVSSload [off1+off2] {sym} val base mem) 10061 for { 10062 off1 := v.AuxInt 10063 sym := v.Aux 10064 mem := v.Args[2] 10065 val := v.Args[0] 10066 v_1 := v.Args[1] 10067 if v_1.Op != OpAMD64ADDQconst { 10068 break 10069 } 10070 off2 := v_1.AuxInt 10071 base := v_1.Args[0] 10072 if !(is32Bit(off1 + off2)) { 10073 break 10074 } 10075 v.reset(OpAMD64DIVSSload) 10076 v.AuxInt = off1 + off2 10077 v.Aux = sym 10078 v.AddArg(val) 10079 v.AddArg(base) 10080 v.AddArg(mem) 10081 return true 10082 } 10083 // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 10084 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10085 // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 10086 for { 10087 off1 := v.AuxInt 10088 sym1 := v.Aux 10089 mem := v.Args[2] 10090 val := v.Args[0] 10091 v_1 := v.Args[1] 10092 if v_1.Op != OpAMD64LEAQ { 10093 break 10094 } 10095 off2 := v_1.AuxInt 10096 sym2 := v_1.Aux 10097 base := v_1.Args[0] 10098 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10099 break 10100 } 10101 v.reset(OpAMD64DIVSSload) 10102 v.AuxInt = off1 + off2 10103 v.Aux = mergeSym(sym1, sym2) 10104 v.AddArg(val) 10105 v.AddArg(base) 10106 v.AddArg(mem) 10107 return true 10108 } 10109 return false 10110 } 10111 func rewriteValueAMD64_OpAMD64HMULL_0(v *Value) bool { 10112 // match: (HMULL x y) 10113 // cond: !x.rematerializeable() && y.rematerializeable() 10114 // result: (HMULL y x) 10115 for { 10116 y := v.Args[1] 10117 x := v.Args[0] 10118 if !(!x.rematerializeable() && y.rematerializeable()) { 10119 break 10120 } 10121 v.reset(OpAMD64HMULL) 10122 v.AddArg(y) 10123 v.AddArg(x) 10124 return true 10125 } 10126 return false 10127 } 10128 func rewriteValueAMD64_OpAMD64HMULLU_0(v *Value) bool { 10129 // match: (HMULLU x y) 10130 // cond: !x.rematerializeable() && y.rematerializeable() 10131 // result: (HMULLU y x) 10132 for { 10133 y := v.Args[1] 10134 x := v.Args[0] 10135 if !(!x.rematerializeable() && y.rematerializeable()) { 10136 break 10137 } 10138 v.reset(OpAMD64HMULLU) 10139 v.AddArg(y) 10140 v.AddArg(x) 10141 return true 10142 } 10143 return false 10144 } 10145 func rewriteValueAMD64_OpAMD64HMULQ_0(v *Value) bool { 10146 // match: (HMULQ x y) 10147 // cond: !x.rematerializeable() && y.rematerializeable() 10148 // result: (HMULQ y x) 10149 for { 10150 y := v.Args[1] 10151 x := v.Args[0] 10152 if !(!x.rematerializeable() && y.rematerializeable()) { 10153 break 10154 } 10155 v.reset(OpAMD64HMULQ) 10156 v.AddArg(y) 10157 v.AddArg(x) 10158 return true 10159 } 10160 return false 10161 } 10162 func rewriteValueAMD64_OpAMD64HMULQU_0(v *Value) bool { 10163 // match: (HMULQU x y) 10164 // cond: !x.rematerializeable() && y.rematerializeable() 10165 // result: (HMULQU y x) 10166 for { 10167 y := v.Args[1] 10168 x := v.Args[0] 10169 if !(!x.rematerializeable() && y.rematerializeable()) { 10170 break 10171 } 10172 v.reset(OpAMD64HMULQU) 10173 v.AddArg(y) 10174 v.AddArg(x) 10175 return true 10176 } 10177 return false 10178 } 10179 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { 10180 // match: (LEAL [c] {s} (ADDLconst [d] x)) 10181 // cond: is32Bit(c+d) 10182 // result: (LEAL [c+d] {s} x) 10183 for { 10184 c := v.AuxInt 10185 s := v.Aux 10186 v_0 := v.Args[0] 10187 if v_0.Op != OpAMD64ADDLconst { 10188 break 10189 } 10190 d := v_0.AuxInt 10191 x := v_0.Args[0] 10192 if !(is32Bit(c + d)) { 10193 break 10194 } 10195 v.reset(OpAMD64LEAL) 10196 v.AuxInt = c + d 10197 v.Aux = s 10198 v.AddArg(x) 10199 return true 10200 } 10201 // match: (LEAL [c] {s} (ADDL x y)) 10202 // cond: x.Op != OpSB && y.Op != OpSB 10203 // result: (LEAL1 [c] {s} x y) 10204 for { 10205 c := v.AuxInt 10206 s := v.Aux 10207 v_0 := v.Args[0] 10208 if v_0.Op != OpAMD64ADDL { 10209 break 10210 } 10211 y := v_0.Args[1] 10212 x := v_0.Args[0] 10213 if !(x.Op != OpSB && y.Op != OpSB) { 10214 break 10215 } 10216 v.reset(OpAMD64LEAL1) 10217 v.AuxInt = c 10218 v.Aux = s 10219 v.AddArg(x) 10220 v.AddArg(y) 10221 return true 10222 } 10223 return false 10224 } 10225 func rewriteValueAMD64_OpAMD64LEAL1_0(v *Value) bool { 10226 // match: (LEAL1 [c] {s} (ADDLconst [d] x) y) 10227 // cond: is32Bit(c+d) && x.Op != OpSB 10228 // result: (LEAL1 [c+d] {s} x y) 10229 for { 10230 c := v.AuxInt 10231 s := v.Aux 10232 y := v.Args[1] 10233 v_0 := v.Args[0] 10234 if v_0.Op != OpAMD64ADDLconst { 10235 break 10236 } 10237 d := v_0.AuxInt 10238 x := v_0.Args[0] 10239 if !(is32Bit(c+d) && x.Op != OpSB) { 10240 break 10241 } 10242 v.reset(OpAMD64LEAL1) 10243 v.AuxInt = c + d 10244 v.Aux = s 10245 v.AddArg(x) 10246 v.AddArg(y) 10247 return true 10248 } 10249 // match: (LEAL1 [c] {s} y (ADDLconst [d] x)) 10250 // cond: is32Bit(c+d) && x.Op != OpSB 10251 // result: (LEAL1 [c+d] {s} x y) 10252 for { 10253 c := v.AuxInt 10254 s := v.Aux 10255 _ = v.Args[1] 10256 y := v.Args[0] 10257 v_1 := v.Args[1] 10258 if v_1.Op != OpAMD64ADDLconst { 10259 break 10260 } 10261 d := v_1.AuxInt 10262 x := v_1.Args[0] 10263 if !(is32Bit(c+d) && x.Op != OpSB) { 10264 break 10265 } 10266 v.reset(OpAMD64LEAL1) 10267 v.AuxInt = c + d 10268 v.Aux = s 10269 v.AddArg(x) 10270 v.AddArg(y) 10271 return true 10272 } 10273 // match: (LEAL1 [c] {s} x (SHLLconst [1] y)) 10274 // result: (LEAL2 [c] {s} x y) 10275 for { 10276 c := v.AuxInt 10277 s := v.Aux 10278 _ = v.Args[1] 10279 x := v.Args[0] 10280 v_1 := v.Args[1] 10281 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { 10282 break 10283 } 10284 y := v_1.Args[0] 10285 v.reset(OpAMD64LEAL2) 10286 v.AuxInt = c 10287 v.Aux = s 10288 v.AddArg(x) 10289 v.AddArg(y) 10290 return true 10291 } 10292 // match: (LEAL1 [c] {s} (SHLLconst [1] y) x) 10293 // result: (LEAL2 [c] {s} x y) 10294 for { 10295 c := v.AuxInt 10296 s := v.Aux 10297 x := v.Args[1] 10298 v_0 := v.Args[0] 10299 if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 1 { 10300 break 10301 } 10302 y := v_0.Args[0] 10303 v.reset(OpAMD64LEAL2) 10304 v.AuxInt = c 10305 v.Aux = s 10306 v.AddArg(x) 10307 v.AddArg(y) 10308 return true 10309 } 10310 // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) 10311 // result: (LEAL4 [c] {s} x y) 10312 for { 10313 c := v.AuxInt 10314 s := v.Aux 10315 _ = v.Args[1] 10316 x := v.Args[0] 10317 v_1 := v.Args[1] 10318 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 { 10319 break 10320 } 10321 y := v_1.Args[0] 10322 v.reset(OpAMD64LEAL4) 10323 v.AuxInt = c 10324 v.Aux = s 10325 v.AddArg(x) 10326 v.AddArg(y) 10327 return true 10328 } 10329 // match: (LEAL1 [c] {s} (SHLLconst [2] y) x) 10330 // result: (LEAL4 [c] {s} x y) 10331 for { 10332 c := v.AuxInt 10333 s := v.Aux 10334 x := v.Args[1] 10335 v_0 := v.Args[0] 10336 if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 2 { 10337 break 10338 } 10339 y := v_0.Args[0] 10340 v.reset(OpAMD64LEAL4) 10341 v.AuxInt = c 10342 v.Aux = s 10343 v.AddArg(x) 10344 v.AddArg(y) 10345 return true 10346 } 10347 // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) 10348 // result: (LEAL8 [c] {s} x y) 10349 for { 10350 c := v.AuxInt 10351 s := v.Aux 10352 _ = v.Args[1] 10353 x := v.Args[0] 10354 v_1 := v.Args[1] 10355 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 3 { 10356 break 10357 } 10358 y := v_1.Args[0] 10359 v.reset(OpAMD64LEAL8) 10360 v.AuxInt = c 10361 v.Aux = s 10362 v.AddArg(x) 10363 v.AddArg(y) 10364 return true 10365 } 10366 // match: (LEAL1 [c] {s} (SHLLconst [3] y) x) 10367 // result: (LEAL8 [c] {s} x y) 10368 for { 10369 c := v.AuxInt 10370 s := v.Aux 10371 x := v.Args[1] 10372 v_0 := v.Args[0] 10373 if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 3 { 10374 break 10375 } 10376 y := v_0.Args[0] 10377 v.reset(OpAMD64LEAL8) 10378 v.AuxInt = c 10379 v.Aux = s 10380 v.AddArg(x) 10381 v.AddArg(y) 10382 return true 10383 } 10384 return false 10385 } 10386 func rewriteValueAMD64_OpAMD64LEAL2_0(v *Value) bool { 10387 // match: (LEAL2 [c] {s} (ADDLconst [d] x) y) 10388 // cond: is32Bit(c+d) && x.Op != OpSB 10389 // result: (LEAL2 [c+d] {s} x y) 10390 for { 10391 c := v.AuxInt 10392 s := v.Aux 10393 y := v.Args[1] 10394 v_0 := v.Args[0] 10395 if v_0.Op != OpAMD64ADDLconst { 10396 break 10397 } 10398 d := v_0.AuxInt 10399 x := v_0.Args[0] 10400 if !(is32Bit(c+d) && x.Op != OpSB) { 10401 break 10402 } 10403 v.reset(OpAMD64LEAL2) 10404 v.AuxInt = c + d 10405 v.Aux = s 10406 v.AddArg(x) 10407 v.AddArg(y) 10408 return true 10409 } 10410 // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) 10411 // cond: is32Bit(c+2*d) && y.Op != OpSB 10412 // result: (LEAL2 [c+2*d] {s} x y) 10413 for { 10414 c := v.AuxInt 10415 s := v.Aux 10416 _ = v.Args[1] 10417 x := v.Args[0] 10418 v_1 := v.Args[1] 10419 if v_1.Op != OpAMD64ADDLconst { 10420 break 10421 } 10422 d := v_1.AuxInt 10423 y := v_1.Args[0] 10424 if !(is32Bit(c+2*d) && y.Op != OpSB) { 10425 break 10426 } 10427 v.reset(OpAMD64LEAL2) 10428 v.AuxInt = c + 2*d 10429 v.Aux = s 10430 v.AddArg(x) 10431 v.AddArg(y) 10432 return true 10433 } 10434 // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) 10435 // result: (LEAL4 [c] {s} x y) 10436 for { 10437 c := v.AuxInt 10438 s := v.Aux 10439 _ = v.Args[1] 10440 x := v.Args[0] 10441 v_1 := v.Args[1] 10442 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { 10443 break 10444 } 10445 y := v_1.Args[0] 10446 v.reset(OpAMD64LEAL4) 10447 v.AuxInt = c 10448 v.Aux = s 10449 v.AddArg(x) 10450 v.AddArg(y) 10451 return true 10452 } 10453 // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) 10454 // result: (LEAL8 [c] {s} x y) 10455 for { 10456 c := v.AuxInt 10457 s := v.Aux 10458 _ = v.Args[1] 10459 x := v.Args[0] 10460 v_1 := v.Args[1] 10461 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 { 10462 break 10463 } 10464 y := v_1.Args[0] 10465 v.reset(OpAMD64LEAL8) 10466 v.AuxInt = c 10467 v.Aux = s 10468 v.AddArg(x) 10469 v.AddArg(y) 10470 return true 10471 } 10472 return false 10473 } 10474 func rewriteValueAMD64_OpAMD64LEAL4_0(v *Value) bool { 10475 // match: (LEAL4 [c] {s} (ADDLconst [d] x) y) 10476 // cond: is32Bit(c+d) && x.Op != OpSB 10477 // result: (LEAL4 [c+d] {s} x y) 10478 for { 10479 c := v.AuxInt 10480 s := v.Aux 10481 y := v.Args[1] 10482 v_0 := v.Args[0] 10483 if v_0.Op != OpAMD64ADDLconst { 10484 break 10485 } 10486 d := v_0.AuxInt 10487 x := v_0.Args[0] 10488 if !(is32Bit(c+d) && x.Op != OpSB) { 10489 break 10490 } 10491 v.reset(OpAMD64LEAL4) 10492 v.AuxInt = c + d 10493 v.Aux = s 10494 v.AddArg(x) 10495 v.AddArg(y) 10496 return true 10497 } 10498 // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) 10499 // cond: is32Bit(c+4*d) && y.Op != OpSB 10500 // result: (LEAL4 [c+4*d] {s} x y) 10501 for { 10502 c := v.AuxInt 10503 s := v.Aux 10504 _ = v.Args[1] 10505 x := v.Args[0] 10506 v_1 := v.Args[1] 10507 if v_1.Op != OpAMD64ADDLconst { 10508 break 10509 } 10510 d := v_1.AuxInt 10511 y := v_1.Args[0] 10512 if !(is32Bit(c+4*d) && y.Op != OpSB) { 10513 break 10514 } 10515 v.reset(OpAMD64LEAL4) 10516 v.AuxInt = c + 4*d 10517 v.Aux = s 10518 v.AddArg(x) 10519 v.AddArg(y) 10520 return true 10521 } 10522 // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) 10523 // result: (LEAL8 [c] {s} x y) 10524 for { 10525 c := v.AuxInt 10526 s := v.Aux 10527 _ = v.Args[1] 10528 x := v.Args[0] 10529 v_1 := v.Args[1] 10530 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 { 10531 break 10532 } 10533 y := v_1.Args[0] 10534 v.reset(OpAMD64LEAL8) 10535 v.AuxInt = c 10536 v.Aux = s 10537 v.AddArg(x) 10538 v.AddArg(y) 10539 return true 10540 } 10541 return false 10542 } 10543 func rewriteValueAMD64_OpAMD64LEAL8_0(v *Value) bool { 10544 // match: (LEAL8 [c] {s} (ADDLconst [d] x) y) 10545 // cond: is32Bit(c+d) && x.Op != OpSB 10546 // result: (LEAL8 [c+d] {s} x y) 10547 for { 10548 c := v.AuxInt 10549 s := v.Aux 10550 y := v.Args[1] 10551 v_0 := v.Args[0] 10552 if v_0.Op != OpAMD64ADDLconst { 10553 break 10554 } 10555 d := v_0.AuxInt 10556 x := v_0.Args[0] 10557 if !(is32Bit(c+d) && x.Op != OpSB) { 10558 break 10559 } 10560 v.reset(OpAMD64LEAL8) 10561 v.AuxInt = c + d 10562 v.Aux = s 10563 v.AddArg(x) 10564 v.AddArg(y) 10565 return true 10566 } 10567 // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) 10568 // cond: is32Bit(c+8*d) && y.Op != OpSB 10569 // result: (LEAL8 [c+8*d] {s} x y) 10570 for { 10571 c := v.AuxInt 10572 s := v.Aux 10573 _ = v.Args[1] 10574 x := v.Args[0] 10575 v_1 := v.Args[1] 10576 if v_1.Op != OpAMD64ADDLconst { 10577 break 10578 } 10579 d := v_1.AuxInt 10580 y := v_1.Args[0] 10581 if !(is32Bit(c+8*d) && y.Op != OpSB) { 10582 break 10583 } 10584 v.reset(OpAMD64LEAL8) 10585 v.AuxInt = c + 8*d 10586 v.Aux = s 10587 v.AddArg(x) 10588 v.AddArg(y) 10589 return true 10590 } 10591 return false 10592 } 10593 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { 10594 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 10595 // cond: is32Bit(c+d) 10596 // result: (LEAQ [c+d] {s} x) 10597 for { 10598 c := v.AuxInt 10599 s := v.Aux 10600 v_0 := v.Args[0] 10601 if v_0.Op != OpAMD64ADDQconst { 10602 break 10603 } 10604 d := v_0.AuxInt 10605 x := v_0.Args[0] 10606 if !(is32Bit(c + d)) { 10607 break 10608 } 10609 v.reset(OpAMD64LEAQ) 10610 v.AuxInt = c + d 10611 v.Aux = s 10612 v.AddArg(x) 10613 return true 10614 } 10615 // match: (LEAQ [c] {s} (ADDQ x y)) 10616 // cond: x.Op != OpSB && y.Op != OpSB 10617 // result: (LEAQ1 [c] {s} x y) 10618 for { 10619 c := v.AuxInt 10620 s := v.Aux 10621 v_0 := v.Args[0] 10622 if v_0.Op != OpAMD64ADDQ { 10623 break 10624 } 10625 y := v_0.Args[1] 10626 x := v_0.Args[0] 10627 if !(x.Op != OpSB && y.Op != OpSB) { 10628 break 10629 } 10630 v.reset(OpAMD64LEAQ1) 10631 v.AuxInt = c 10632 v.Aux = s 10633 v.AddArg(x) 10634 v.AddArg(y) 10635 return true 10636 } 10637 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 10638 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10639 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 10640 for { 10641 off1 := v.AuxInt 10642 sym1 := v.Aux 10643 v_0 := v.Args[0] 10644 if v_0.Op != OpAMD64LEAQ { 10645 break 10646 } 10647 off2 := v_0.AuxInt 10648 sym2 := v_0.Aux 10649 x := v_0.Args[0] 10650 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10651 break 10652 } 10653 v.reset(OpAMD64LEAQ) 10654 v.AuxInt = off1 + off2 10655 v.Aux = mergeSym(sym1, sym2) 10656 v.AddArg(x) 10657 return true 10658 } 10659 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 10660 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10661 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 10662 for { 10663 off1 := v.AuxInt 10664 sym1 := v.Aux 10665 v_0 := v.Args[0] 10666 if v_0.Op != OpAMD64LEAQ1 { 10667 break 10668 } 10669 off2 := v_0.AuxInt 10670 sym2 := v_0.Aux 10671 y := v_0.Args[1] 10672 x := v_0.Args[0] 10673 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10674 break 10675 } 10676 v.reset(OpAMD64LEAQ1) 10677 v.AuxInt = off1 + off2 10678 v.Aux = mergeSym(sym1, sym2) 10679 v.AddArg(x) 10680 v.AddArg(y) 10681 return true 10682 } 10683 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 10684 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10685 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 10686 for { 10687 off1 := v.AuxInt 10688 sym1 := v.Aux 10689 v_0 := v.Args[0] 10690 if v_0.Op != OpAMD64LEAQ2 { 10691 break 10692 } 10693 off2 := v_0.AuxInt 10694 sym2 := v_0.Aux 10695 y := v_0.Args[1] 10696 x := v_0.Args[0] 10697 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10698 break 10699 } 10700 v.reset(OpAMD64LEAQ2) 10701 v.AuxInt = off1 + off2 10702 v.Aux = mergeSym(sym1, sym2) 10703 v.AddArg(x) 10704 v.AddArg(y) 10705 return true 10706 } 10707 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 10708 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10709 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 10710 for { 10711 off1 := v.AuxInt 10712 sym1 := v.Aux 10713 v_0 := v.Args[0] 10714 if v_0.Op != OpAMD64LEAQ4 { 10715 break 10716 } 10717 off2 := v_0.AuxInt 10718 sym2 := v_0.Aux 10719 y := v_0.Args[1] 10720 x := v_0.Args[0] 10721 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10722 break 10723 } 10724 v.reset(OpAMD64LEAQ4) 10725 v.AuxInt = off1 + off2 10726 v.Aux = mergeSym(sym1, sym2) 10727 v.AddArg(x) 10728 v.AddArg(y) 10729 return true 10730 } 10731 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 10732 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10733 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 10734 for { 10735 off1 := v.AuxInt 10736 sym1 := v.Aux 10737 v_0 := v.Args[0] 10738 if v_0.Op != OpAMD64LEAQ8 { 10739 break 10740 } 10741 off2 := v_0.AuxInt 10742 sym2 := v_0.Aux 10743 y := v_0.Args[1] 10744 x := v_0.Args[0] 10745 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10746 break 10747 } 10748 v.reset(OpAMD64LEAQ8) 10749 v.AuxInt = off1 + off2 10750 v.Aux = mergeSym(sym1, sym2) 10751 v.AddArg(x) 10752 v.AddArg(y) 10753 return true 10754 } 10755 return false 10756 } 10757 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { 10758 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 10759 // cond: is32Bit(c+d) && x.Op != OpSB 10760 // result: (LEAQ1 [c+d] {s} x y) 10761 for { 10762 c := v.AuxInt 10763 s := v.Aux 10764 y := v.Args[1] 10765 v_0 := v.Args[0] 10766 if v_0.Op != OpAMD64ADDQconst { 10767 break 10768 } 10769 d := v_0.AuxInt 10770 x := v_0.Args[0] 10771 if !(is32Bit(c+d) && x.Op != OpSB) { 10772 break 10773 } 10774 v.reset(OpAMD64LEAQ1) 10775 v.AuxInt = c + d 10776 v.Aux = s 10777 v.AddArg(x) 10778 v.AddArg(y) 10779 return true 10780 } 10781 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 10782 // cond: is32Bit(c+d) && x.Op != OpSB 10783 // result: (LEAQ1 [c+d] {s} x y) 10784 for { 10785 c := v.AuxInt 10786 s := v.Aux 10787 _ = v.Args[1] 10788 y := v.Args[0] 10789 v_1 := v.Args[1] 10790 if v_1.Op != OpAMD64ADDQconst { 10791 break 10792 } 10793 d := v_1.AuxInt 10794 x := v_1.Args[0] 10795 if !(is32Bit(c+d) && x.Op != OpSB) { 10796 break 10797 } 10798 v.reset(OpAMD64LEAQ1) 10799 v.AuxInt = c + d 10800 v.Aux = s 10801 v.AddArg(x) 10802 v.AddArg(y) 10803 return true 10804 } 10805 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 10806 // result: (LEAQ2 [c] {s} x y) 10807 for { 10808 c := v.AuxInt 10809 s := v.Aux 10810 _ = v.Args[1] 10811 x := v.Args[0] 10812 v_1 := v.Args[1] 10813 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { 10814 break 10815 } 10816 y := v_1.Args[0] 10817 v.reset(OpAMD64LEAQ2) 10818 v.AuxInt = c 10819 v.Aux = s 10820 v.AddArg(x) 10821 v.AddArg(y) 10822 return true 10823 } 10824 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 10825 // result: (LEAQ2 [c] {s} x y) 10826 for { 10827 c := v.AuxInt 10828 s := v.Aux 10829 x := v.Args[1] 10830 v_0 := v.Args[0] 10831 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { 10832 break 10833 } 10834 y := v_0.Args[0] 10835 v.reset(OpAMD64LEAQ2) 10836 v.AuxInt = c 10837 v.Aux = s 10838 v.AddArg(x) 10839 v.AddArg(y) 10840 return true 10841 } 10842 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 10843 // result: (LEAQ4 [c] {s} x y) 10844 for { 10845 c := v.AuxInt 10846 s := v.Aux 10847 _ = v.Args[1] 10848 x := v.Args[0] 10849 v_1 := v.Args[1] 10850 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { 10851 break 10852 } 10853 y := v_1.Args[0] 10854 v.reset(OpAMD64LEAQ4) 10855 v.AuxInt = c 10856 v.Aux = s 10857 v.AddArg(x) 10858 v.AddArg(y) 10859 return true 10860 } 10861 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 10862 // result: (LEAQ4 [c] {s} x y) 10863 for { 10864 c := v.AuxInt 10865 s := v.Aux 10866 x := v.Args[1] 10867 v_0 := v.Args[0] 10868 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 2 { 10869 break 10870 } 10871 y := v_0.Args[0] 10872 v.reset(OpAMD64LEAQ4) 10873 v.AuxInt = c 10874 v.Aux = s 10875 v.AddArg(x) 10876 v.AddArg(y) 10877 return true 10878 } 10879 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 10880 // result: (LEAQ8 [c] {s} x y) 10881 for { 10882 c := v.AuxInt 10883 s := v.Aux 10884 _ = v.Args[1] 10885 x := v.Args[0] 10886 v_1 := v.Args[1] 10887 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { 10888 break 10889 } 10890 y := v_1.Args[0] 10891 v.reset(OpAMD64LEAQ8) 10892 v.AuxInt = c 10893 v.Aux = s 10894 v.AddArg(x) 10895 v.AddArg(y) 10896 return true 10897 } 10898 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 10899 // result: (LEAQ8 [c] {s} x y) 10900 for { 10901 c := v.AuxInt 10902 s := v.Aux 10903 x := v.Args[1] 10904 v_0 := v.Args[0] 10905 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 3 { 10906 break 10907 } 10908 y := v_0.Args[0] 10909 v.reset(OpAMD64LEAQ8) 10910 v.AuxInt = c 10911 v.Aux = s 10912 v.AddArg(x) 10913 v.AddArg(y) 10914 return true 10915 } 10916 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 10917 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 10918 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 10919 for { 10920 off1 := v.AuxInt 10921 sym1 := v.Aux 10922 y := v.Args[1] 10923 v_0 := v.Args[0] 10924 if v_0.Op != OpAMD64LEAQ { 10925 break 10926 } 10927 off2 := v_0.AuxInt 10928 sym2 := v_0.Aux 10929 x := v_0.Args[0] 10930 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 10931 break 10932 } 10933 v.reset(OpAMD64LEAQ1) 10934 v.AuxInt = off1 + off2 10935 v.Aux = mergeSym(sym1, sym2) 10936 v.AddArg(x) 10937 v.AddArg(y) 10938 return true 10939 } 10940 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 10941 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 10942 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 10943 for { 10944 off1 := v.AuxInt 10945 sym1 := v.Aux 10946 _ = v.Args[1] 10947 y := v.Args[0] 10948 v_1 := v.Args[1] 10949 if v_1.Op != OpAMD64LEAQ { 10950 break 10951 } 10952 off2 := v_1.AuxInt 10953 sym2 := v_1.Aux 10954 x := v_1.Args[0] 10955 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 10956 break 10957 } 10958 v.reset(OpAMD64LEAQ1) 10959 v.AuxInt = off1 + off2 10960 v.Aux = mergeSym(sym1, sym2) 10961 v.AddArg(x) 10962 v.AddArg(y) 10963 return true 10964 } 10965 return false 10966 } 10967 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { 10968 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 10969 // cond: is32Bit(c+d) && x.Op != OpSB 10970 // result: (LEAQ2 [c+d] {s} x y) 10971 for { 10972 c := v.AuxInt 10973 s := v.Aux 10974 y := v.Args[1] 10975 v_0 := v.Args[0] 10976 if v_0.Op != OpAMD64ADDQconst { 10977 break 10978 } 10979 d := v_0.AuxInt 10980 x := v_0.Args[0] 10981 if !(is32Bit(c+d) && x.Op != OpSB) { 10982 break 10983 } 10984 v.reset(OpAMD64LEAQ2) 10985 v.AuxInt = c + d 10986 v.Aux = s 10987 v.AddArg(x) 10988 v.AddArg(y) 10989 return true 10990 } 10991 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 10992 // cond: is32Bit(c+2*d) && y.Op != OpSB 10993 // result: (LEAQ2 [c+2*d] {s} x y) 10994 for { 10995 c := v.AuxInt 10996 s := v.Aux 10997 _ = v.Args[1] 10998 x := v.Args[0] 10999 v_1 := v.Args[1] 11000 if v_1.Op != OpAMD64ADDQconst { 11001 break 11002 } 11003 d := v_1.AuxInt 11004 y := v_1.Args[0] 11005 if !(is32Bit(c+2*d) && y.Op != OpSB) { 11006 break 11007 } 11008 v.reset(OpAMD64LEAQ2) 11009 v.AuxInt = c + 2*d 11010 v.Aux = s 11011 v.AddArg(x) 11012 v.AddArg(y) 11013 return true 11014 } 11015 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 11016 // result: (LEAQ4 [c] {s} x y) 11017 for { 11018 c := v.AuxInt 11019 s := v.Aux 11020 _ = v.Args[1] 11021 x := v.Args[0] 11022 v_1 := v.Args[1] 11023 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { 11024 break 11025 } 11026 y := v_1.Args[0] 11027 v.reset(OpAMD64LEAQ4) 11028 v.AuxInt = c 11029 v.Aux = s 11030 v.AddArg(x) 11031 v.AddArg(y) 11032 return true 11033 } 11034 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 11035 // result: (LEAQ8 [c] {s} x y) 11036 for { 11037 c := v.AuxInt 11038 s := v.Aux 11039 _ = v.Args[1] 11040 x := v.Args[0] 11041 v_1 := v.Args[1] 11042 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { 11043 break 11044 } 11045 y := v_1.Args[0] 11046 v.reset(OpAMD64LEAQ8) 11047 v.AuxInt = c 11048 v.Aux = s 11049 v.AddArg(x) 11050 v.AddArg(y) 11051 return true 11052 } 11053 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 11054 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 11055 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 11056 for { 11057 off1 := v.AuxInt 11058 sym1 := v.Aux 11059 y := v.Args[1] 11060 v_0 := v.Args[0] 11061 if v_0.Op != OpAMD64LEAQ { 11062 break 11063 } 11064 off2 := v_0.AuxInt 11065 sym2 := v_0.Aux 11066 x := v_0.Args[0] 11067 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 11068 break 11069 } 11070 v.reset(OpAMD64LEAQ2) 11071 v.AuxInt = off1 + off2 11072 v.Aux = mergeSym(sym1, sym2) 11073 v.AddArg(x) 11074 v.AddArg(y) 11075 return true 11076 } 11077 return false 11078 } 11079 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { 11080 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 11081 // cond: is32Bit(c+d) && x.Op != OpSB 11082 // result: (LEAQ4 [c+d] {s} x y) 11083 for { 11084 c := v.AuxInt 11085 s := v.Aux 11086 y := v.Args[1] 11087 v_0 := v.Args[0] 11088 if v_0.Op != OpAMD64ADDQconst { 11089 break 11090 } 11091 d := v_0.AuxInt 11092 x := v_0.Args[0] 11093 if !(is32Bit(c+d) && x.Op != OpSB) { 11094 break 11095 } 11096 v.reset(OpAMD64LEAQ4) 11097 v.AuxInt = c + d 11098 v.Aux = s 11099 v.AddArg(x) 11100 v.AddArg(y) 11101 return true 11102 } 11103 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 11104 // cond: is32Bit(c+4*d) && y.Op != OpSB 11105 // result: (LEAQ4 [c+4*d] {s} x y) 11106 for { 11107 c := v.AuxInt 11108 s := v.Aux 11109 _ = v.Args[1] 11110 x := v.Args[0] 11111 v_1 := v.Args[1] 11112 if v_1.Op != OpAMD64ADDQconst { 11113 break 11114 } 11115 d := v_1.AuxInt 11116 y := v_1.Args[0] 11117 if !(is32Bit(c+4*d) && y.Op != OpSB) { 11118 break 11119 } 11120 v.reset(OpAMD64LEAQ4) 11121 v.AuxInt = c + 4*d 11122 v.Aux = s 11123 v.AddArg(x) 11124 v.AddArg(y) 11125 return true 11126 } 11127 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 11128 // result: (LEAQ8 [c] {s} x y) 11129 for { 11130 c := v.AuxInt 11131 s := v.Aux 11132 _ = v.Args[1] 11133 x := v.Args[0] 11134 v_1 := v.Args[1] 11135 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { 11136 break 11137 } 11138 y := v_1.Args[0] 11139 v.reset(OpAMD64LEAQ8) 11140 v.AuxInt = c 11141 v.Aux = s 11142 v.AddArg(x) 11143 v.AddArg(y) 11144 return true 11145 } 11146 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 11147 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 11148 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 11149 for { 11150 off1 := v.AuxInt 11151 sym1 := v.Aux 11152 y := v.Args[1] 11153 v_0 := v.Args[0] 11154 if v_0.Op != OpAMD64LEAQ { 11155 break 11156 } 11157 off2 := v_0.AuxInt 11158 sym2 := v_0.Aux 11159 x := v_0.Args[0] 11160 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 11161 break 11162 } 11163 v.reset(OpAMD64LEAQ4) 11164 v.AuxInt = off1 + off2 11165 v.Aux = mergeSym(sym1, sym2) 11166 v.AddArg(x) 11167 v.AddArg(y) 11168 return true 11169 } 11170 return false 11171 } 11172 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool { 11173 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 11174 // cond: is32Bit(c+d) && x.Op != OpSB 11175 // result: (LEAQ8 [c+d] {s} x y) 11176 for { 11177 c := v.AuxInt 11178 s := v.Aux 11179 y := v.Args[1] 11180 v_0 := v.Args[0] 11181 if v_0.Op != OpAMD64ADDQconst { 11182 break 11183 } 11184 d := v_0.AuxInt 11185 x := v_0.Args[0] 11186 if !(is32Bit(c+d) && x.Op != OpSB) { 11187 break 11188 } 11189 v.reset(OpAMD64LEAQ8) 11190 v.AuxInt = c + d 11191 v.Aux = s 11192 v.AddArg(x) 11193 v.AddArg(y) 11194 return true 11195 } 11196 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 11197 // cond: is32Bit(c+8*d) && y.Op != OpSB 11198 // result: (LEAQ8 [c+8*d] {s} x y) 11199 for { 11200 c := v.AuxInt 11201 s := v.Aux 11202 _ = v.Args[1] 11203 x := v.Args[0] 11204 v_1 := v.Args[1] 11205 if v_1.Op != OpAMD64ADDQconst { 11206 break 11207 } 11208 d := v_1.AuxInt 11209 y := v_1.Args[0] 11210 if !(is32Bit(c+8*d) && y.Op != OpSB) { 11211 break 11212 } 11213 v.reset(OpAMD64LEAQ8) 11214 v.AuxInt = c + 8*d 11215 v.Aux = s 11216 v.AddArg(x) 11217 v.AddArg(y) 11218 return true 11219 } 11220 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 11221 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 11222 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 11223 for { 11224 off1 := v.AuxInt 11225 sym1 := v.Aux 11226 y := v.Args[1] 11227 v_0 := v.Args[0] 11228 if v_0.Op != OpAMD64LEAQ { 11229 break 11230 } 11231 off2 := v_0.AuxInt 11232 sym2 := v_0.Aux 11233 x := v_0.Args[0] 11234 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 11235 break 11236 } 11237 v.reset(OpAMD64LEAQ8) 11238 v.AuxInt = off1 + off2 11239 v.Aux = mergeSym(sym1, sym2) 11240 v.AddArg(x) 11241 v.AddArg(y) 11242 return true 11243 } 11244 return false 11245 } 11246 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { 11247 b := v.Block 11248 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 11249 // cond: x.Uses == 1 && clobber(x) 11250 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 11251 for { 11252 x := v.Args[0] 11253 if x.Op != OpAMD64MOVBload { 11254 break 11255 } 11256 off := x.AuxInt 11257 sym := x.Aux 11258 mem := x.Args[1] 11259 ptr := x.Args[0] 11260 if !(x.Uses == 1 && clobber(x)) { 11261 break 11262 } 11263 b = x.Block 11264 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 11265 v.reset(OpCopy) 11266 v.AddArg(v0) 11267 v0.AuxInt = off 11268 v0.Aux = sym 11269 v0.AddArg(ptr) 11270 v0.AddArg(mem) 11271 return true 11272 } 11273 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 11274 // cond: x.Uses == 1 && clobber(x) 11275 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 11276 for { 11277 x := v.Args[0] 11278 if x.Op != OpAMD64MOVWload { 11279 break 11280 } 11281 off := x.AuxInt 11282 sym := x.Aux 11283 mem := x.Args[1] 11284 ptr := x.Args[0] 11285 if !(x.Uses == 1 && clobber(x)) { 11286 break 11287 } 11288 b = x.Block 11289 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 11290 v.reset(OpCopy) 11291 v.AddArg(v0) 11292 v0.AuxInt = off 11293 v0.Aux = sym 11294 v0.AddArg(ptr) 11295 v0.AddArg(mem) 11296 return true 11297 } 11298 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 11299 // cond: x.Uses == 1 && clobber(x) 11300 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 11301 for { 11302 x := v.Args[0] 11303 if x.Op != OpAMD64MOVLload { 11304 break 11305 } 11306 off := x.AuxInt 11307 sym := x.Aux 11308 mem := x.Args[1] 11309 ptr := x.Args[0] 11310 if !(x.Uses == 1 && clobber(x)) { 11311 break 11312 } 11313 b = x.Block 11314 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 11315 v.reset(OpCopy) 11316 v.AddArg(v0) 11317 v0.AuxInt = off 11318 v0.Aux = sym 11319 v0.AddArg(ptr) 11320 v0.AddArg(mem) 11321 return true 11322 } 11323 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 11324 // cond: x.Uses == 1 && clobber(x) 11325 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 11326 for { 11327 x := v.Args[0] 11328 if x.Op != OpAMD64MOVQload { 11329 break 11330 } 11331 off := x.AuxInt 11332 sym := x.Aux 11333 mem := x.Args[1] 11334 ptr := x.Args[0] 11335 if !(x.Uses == 1 && clobber(x)) { 11336 break 11337 } 11338 b = x.Block 11339 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) 11340 v.reset(OpCopy) 11341 v.AddArg(v0) 11342 v0.AuxInt = off 11343 v0.Aux = sym 11344 v0.AddArg(ptr) 11345 v0.AddArg(mem) 11346 return true 11347 } 11348 // match: (MOVBQSX (ANDLconst [c] x)) 11349 // cond: c & 0x80 == 0 11350 // result: (ANDLconst [c & 0x7f] x) 11351 for { 11352 v_0 := v.Args[0] 11353 if v_0.Op != OpAMD64ANDLconst { 11354 break 11355 } 11356 c := v_0.AuxInt 11357 x := v_0.Args[0] 11358 if !(c&0x80 == 0) { 11359 break 11360 } 11361 v.reset(OpAMD64ANDLconst) 11362 v.AuxInt = c & 0x7f 11363 v.AddArg(x) 11364 return true 11365 } 11366 // match: (MOVBQSX (MOVBQSX x)) 11367 // result: (MOVBQSX x) 11368 for { 11369 v_0 := v.Args[0] 11370 if v_0.Op != OpAMD64MOVBQSX { 11371 break 11372 } 11373 x := v_0.Args[0] 11374 v.reset(OpAMD64MOVBQSX) 11375 v.AddArg(x) 11376 return true 11377 } 11378 return false 11379 } 11380 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { 11381 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 11382 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 11383 // result: (MOVBQSX x) 11384 for { 11385 off := v.AuxInt 11386 sym := v.Aux 11387 _ = v.Args[1] 11388 ptr := v.Args[0] 11389 v_1 := v.Args[1] 11390 if v_1.Op != OpAMD64MOVBstore { 11391 break 11392 } 11393 off2 := v_1.AuxInt 11394 sym2 := v_1.Aux 11395 _ = v_1.Args[2] 11396 ptr2 := v_1.Args[0] 11397 x := v_1.Args[1] 11398 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 11399 break 11400 } 11401 v.reset(OpAMD64MOVBQSX) 11402 v.AddArg(x) 11403 return true 11404 } 11405 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11406 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11407 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11408 for { 11409 off1 := v.AuxInt 11410 sym1 := v.Aux 11411 mem := v.Args[1] 11412 v_0 := v.Args[0] 11413 if v_0.Op != OpAMD64LEAQ { 11414 break 11415 } 11416 off2 := v_0.AuxInt 11417 sym2 := v_0.Aux 11418 base := v_0.Args[0] 11419 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11420 break 11421 } 11422 v.reset(OpAMD64MOVBQSXload) 11423 v.AuxInt = off1 + off2 11424 v.Aux = mergeSym(sym1, sym2) 11425 v.AddArg(base) 11426 v.AddArg(mem) 11427 return true 11428 } 11429 return false 11430 } 11431 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { 11432 b := v.Block 11433 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 11434 // cond: x.Uses == 1 && clobber(x) 11435 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 11436 for { 11437 x := v.Args[0] 11438 if x.Op != OpAMD64MOVBload { 11439 break 11440 } 11441 off := x.AuxInt 11442 sym := x.Aux 11443 mem := x.Args[1] 11444 ptr := x.Args[0] 11445 if !(x.Uses == 1 && clobber(x)) { 11446 break 11447 } 11448 b = x.Block 11449 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 11450 v.reset(OpCopy) 11451 v.AddArg(v0) 11452 v0.AuxInt = off 11453 v0.Aux = sym 11454 v0.AddArg(ptr) 11455 v0.AddArg(mem) 11456 return true 11457 } 11458 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 11459 // cond: x.Uses == 1 && clobber(x) 11460 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 11461 for { 11462 x := v.Args[0] 11463 if x.Op != OpAMD64MOVWload { 11464 break 11465 } 11466 off := x.AuxInt 11467 sym := x.Aux 11468 mem := x.Args[1] 11469 ptr := x.Args[0] 11470 if !(x.Uses == 1 && clobber(x)) { 11471 break 11472 } 11473 b = x.Block 11474 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 11475 v.reset(OpCopy) 11476 v.AddArg(v0) 11477 v0.AuxInt = off 11478 v0.Aux = sym 11479 v0.AddArg(ptr) 11480 v0.AddArg(mem) 11481 return true 11482 } 11483 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 11484 // cond: x.Uses == 1 && clobber(x) 11485 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 11486 for { 11487 x := v.Args[0] 11488 if x.Op != OpAMD64MOVLload { 11489 break 11490 } 11491 off := x.AuxInt 11492 sym := x.Aux 11493 mem := x.Args[1] 11494 ptr := x.Args[0] 11495 if !(x.Uses == 1 && clobber(x)) { 11496 break 11497 } 11498 b = x.Block 11499 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 11500 v.reset(OpCopy) 11501 v.AddArg(v0) 11502 v0.AuxInt = off 11503 v0.Aux = sym 11504 v0.AddArg(ptr) 11505 v0.AddArg(mem) 11506 return true 11507 } 11508 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 11509 // cond: x.Uses == 1 && clobber(x) 11510 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 11511 for { 11512 x := v.Args[0] 11513 if x.Op != OpAMD64MOVQload { 11514 break 11515 } 11516 off := x.AuxInt 11517 sym := x.Aux 11518 mem := x.Args[1] 11519 ptr := x.Args[0] 11520 if !(x.Uses == 1 && clobber(x)) { 11521 break 11522 } 11523 b = x.Block 11524 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) 11525 v.reset(OpCopy) 11526 v.AddArg(v0) 11527 v0.AuxInt = off 11528 v0.Aux = sym 11529 v0.AddArg(ptr) 11530 v0.AddArg(mem) 11531 return true 11532 } 11533 // match: (MOVBQZX x) 11534 // cond: zeroUpper56Bits(x,3) 11535 // result: x 11536 for { 11537 x := v.Args[0] 11538 if !(zeroUpper56Bits(x, 3)) { 11539 break 11540 } 11541 v.reset(OpCopy) 11542 v.Type = x.Type 11543 v.AddArg(x) 11544 return true 11545 } 11546 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 11547 // cond: x.Uses == 1 && clobber(x) 11548 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 11549 for { 11550 x := v.Args[0] 11551 if x.Op != OpAMD64MOVBloadidx1 { 11552 break 11553 } 11554 off := x.AuxInt 11555 sym := x.Aux 11556 mem := x.Args[2] 11557 ptr := x.Args[0] 11558 idx := x.Args[1] 11559 if !(x.Uses == 1 && clobber(x)) { 11560 break 11561 } 11562 b = x.Block 11563 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 11564 v.reset(OpCopy) 11565 v.AddArg(v0) 11566 v0.AuxInt = off 11567 v0.Aux = sym 11568 v0.AddArg(ptr) 11569 v0.AddArg(idx) 11570 v0.AddArg(mem) 11571 return true 11572 } 11573 // match: (MOVBQZX (ANDLconst [c] x)) 11574 // result: (ANDLconst [c & 0xff] x) 11575 for { 11576 v_0 := v.Args[0] 11577 if v_0.Op != OpAMD64ANDLconst { 11578 break 11579 } 11580 c := v_0.AuxInt 11581 x := v_0.Args[0] 11582 v.reset(OpAMD64ANDLconst) 11583 v.AuxInt = c & 0xff 11584 v.AddArg(x) 11585 return true 11586 } 11587 // match: (MOVBQZX (MOVBQZX x)) 11588 // result: (MOVBQZX x) 11589 for { 11590 v_0 := v.Args[0] 11591 if v_0.Op != OpAMD64MOVBQZX { 11592 break 11593 } 11594 x := v_0.Args[0] 11595 v.reset(OpAMD64MOVBQZX) 11596 v.AddArg(x) 11597 return true 11598 } 11599 return false 11600 } 11601 func rewriteValueAMD64_OpAMD64MOVBatomicload_0(v *Value) bool { 11602 // match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 11603 // cond: is32Bit(off1+off2) 11604 // result: (MOVBatomicload [off1+off2] {sym} ptr mem) 11605 for { 11606 off1 := v.AuxInt 11607 sym := v.Aux 11608 mem := v.Args[1] 11609 v_0 := v.Args[0] 11610 if v_0.Op != OpAMD64ADDQconst { 11611 break 11612 } 11613 off2 := v_0.AuxInt 11614 ptr := v_0.Args[0] 11615 if !(is32Bit(off1 + off2)) { 11616 break 11617 } 11618 v.reset(OpAMD64MOVBatomicload) 11619 v.AuxInt = off1 + off2 11620 v.Aux = sym 11621 v.AddArg(ptr) 11622 v.AddArg(mem) 11623 return true 11624 } 11625 // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 11626 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11627 // result: (MOVBatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 11628 for { 11629 off1 := v.AuxInt 11630 sym1 := v.Aux 11631 mem := v.Args[1] 11632 v_0 := v.Args[0] 11633 if v_0.Op != OpAMD64LEAQ { 11634 break 11635 } 11636 off2 := v_0.AuxInt 11637 sym2 := v_0.Aux 11638 ptr := v_0.Args[0] 11639 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11640 break 11641 } 11642 v.reset(OpAMD64MOVBatomicload) 11643 v.AuxInt = off1 + off2 11644 v.Aux = mergeSym(sym1, sym2) 11645 v.AddArg(ptr) 11646 v.AddArg(mem) 11647 return true 11648 } 11649 return false 11650 } 11651 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { 11652 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 11653 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 11654 // result: (MOVBQZX x) 11655 for { 11656 off := v.AuxInt 11657 sym := v.Aux 11658 _ = v.Args[1] 11659 ptr := v.Args[0] 11660 v_1 := v.Args[1] 11661 if v_1.Op != OpAMD64MOVBstore { 11662 break 11663 } 11664 off2 := v_1.AuxInt 11665 sym2 := v_1.Aux 11666 _ = v_1.Args[2] 11667 ptr2 := v_1.Args[0] 11668 x := v_1.Args[1] 11669 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 11670 break 11671 } 11672 v.reset(OpAMD64MOVBQZX) 11673 v.AddArg(x) 11674 return true 11675 } 11676 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 11677 // cond: is32Bit(off1+off2) 11678 // result: (MOVBload [off1+off2] {sym} ptr mem) 11679 for { 11680 off1 := v.AuxInt 11681 sym := v.Aux 11682 mem := v.Args[1] 11683 v_0 := v.Args[0] 11684 if v_0.Op != OpAMD64ADDQconst { 11685 break 11686 } 11687 off2 := v_0.AuxInt 11688 ptr := v_0.Args[0] 11689 if !(is32Bit(off1 + off2)) { 11690 break 11691 } 11692 v.reset(OpAMD64MOVBload) 11693 v.AuxInt = off1 + off2 11694 v.Aux = sym 11695 v.AddArg(ptr) 11696 v.AddArg(mem) 11697 return true 11698 } 11699 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11700 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11701 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11702 for { 11703 off1 := v.AuxInt 11704 sym1 := v.Aux 11705 mem := v.Args[1] 11706 v_0 := v.Args[0] 11707 if v_0.Op != OpAMD64LEAQ { 11708 break 11709 } 11710 off2 := v_0.AuxInt 11711 sym2 := v_0.Aux 11712 base := v_0.Args[0] 11713 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11714 break 11715 } 11716 v.reset(OpAMD64MOVBload) 11717 v.AuxInt = off1 + off2 11718 v.Aux = mergeSym(sym1, sym2) 11719 v.AddArg(base) 11720 v.AddArg(mem) 11721 return true 11722 } 11723 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 11724 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11725 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11726 for { 11727 off1 := v.AuxInt 11728 sym1 := v.Aux 11729 mem := v.Args[1] 11730 v_0 := v.Args[0] 11731 if v_0.Op != OpAMD64LEAQ1 { 11732 break 11733 } 11734 off2 := v_0.AuxInt 11735 sym2 := v_0.Aux 11736 idx := v_0.Args[1] 11737 ptr := v_0.Args[0] 11738 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11739 break 11740 } 11741 v.reset(OpAMD64MOVBloadidx1) 11742 v.AuxInt = off1 + off2 11743 v.Aux = mergeSym(sym1, sym2) 11744 v.AddArg(ptr) 11745 v.AddArg(idx) 11746 v.AddArg(mem) 11747 return true 11748 } 11749 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 11750 // cond: ptr.Op != OpSB 11751 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 11752 for { 11753 off := v.AuxInt 11754 sym := v.Aux 11755 mem := v.Args[1] 11756 v_0 := v.Args[0] 11757 if v_0.Op != OpAMD64ADDQ { 11758 break 11759 } 11760 idx := v_0.Args[1] 11761 ptr := v_0.Args[0] 11762 if !(ptr.Op != OpSB) { 11763 break 11764 } 11765 v.reset(OpAMD64MOVBloadidx1) 11766 v.AuxInt = off 11767 v.Aux = sym 11768 v.AddArg(ptr) 11769 v.AddArg(idx) 11770 v.AddArg(mem) 11771 return true 11772 } 11773 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 11774 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 11775 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11776 for { 11777 off1 := v.AuxInt 11778 sym1 := v.Aux 11779 mem := v.Args[1] 11780 v_0 := v.Args[0] 11781 if v_0.Op != OpAMD64LEAL { 11782 break 11783 } 11784 off2 := v_0.AuxInt 11785 sym2 := v_0.Aux 11786 base := v_0.Args[0] 11787 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 11788 break 11789 } 11790 v.reset(OpAMD64MOVBload) 11791 v.AuxInt = off1 + off2 11792 v.Aux = mergeSym(sym1, sym2) 11793 v.AddArg(base) 11794 v.AddArg(mem) 11795 return true 11796 } 11797 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 11798 // cond: is32Bit(off1+off2) 11799 // result: (MOVBload [off1+off2] {sym} ptr mem) 11800 for { 11801 off1 := v.AuxInt 11802 sym := v.Aux 11803 mem := v.Args[1] 11804 v_0 := v.Args[0] 11805 if v_0.Op != OpAMD64ADDLconst { 11806 break 11807 } 11808 off2 := v_0.AuxInt 11809 ptr := v_0.Args[0] 11810 if !(is32Bit(off1 + off2)) { 11811 break 11812 } 11813 v.reset(OpAMD64MOVBload) 11814 v.AuxInt = off1 + off2 11815 v.Aux = sym 11816 v.AddArg(ptr) 11817 v.AddArg(mem) 11818 return true 11819 } 11820 // match: (MOVBload [off] {sym} (SB) _) 11821 // cond: symIsRO(sym) 11822 // result: (MOVLconst [int64(read8(sym, off))]) 11823 for { 11824 off := v.AuxInt 11825 sym := v.Aux 11826 _ = v.Args[1] 11827 v_0 := v.Args[0] 11828 if v_0.Op != OpSB || !(symIsRO(sym)) { 11829 break 11830 } 11831 v.reset(OpAMD64MOVLconst) 11832 v.AuxInt = int64(read8(sym, off)) 11833 return true 11834 } 11835 return false 11836 } 11837 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { 11838 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11839 // cond: is32Bit(c+d) 11840 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 11841 for { 11842 c := v.AuxInt 11843 sym := v.Aux 11844 mem := v.Args[2] 11845 v_0 := v.Args[0] 11846 if v_0.Op != OpAMD64ADDQconst { 11847 break 11848 } 11849 d := v_0.AuxInt 11850 ptr := v_0.Args[0] 11851 idx := v.Args[1] 11852 if !(is32Bit(c + d)) { 11853 break 11854 } 11855 v.reset(OpAMD64MOVBloadidx1) 11856 v.AuxInt = c + d 11857 v.Aux = sym 11858 v.AddArg(ptr) 11859 v.AddArg(idx) 11860 v.AddArg(mem) 11861 return true 11862 } 11863 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 11864 // cond: is32Bit(c+d) 11865 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 11866 for { 11867 c := v.AuxInt 11868 sym := v.Aux 11869 mem := v.Args[2] 11870 idx := v.Args[0] 11871 v_1 := v.Args[1] 11872 if v_1.Op != OpAMD64ADDQconst { 11873 break 11874 } 11875 d := v_1.AuxInt 11876 ptr := v_1.Args[0] 11877 if !(is32Bit(c + d)) { 11878 break 11879 } 11880 v.reset(OpAMD64MOVBloadidx1) 11881 v.AuxInt = c + d 11882 v.Aux = sym 11883 v.AddArg(ptr) 11884 v.AddArg(idx) 11885 v.AddArg(mem) 11886 return true 11887 } 11888 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11889 // cond: is32Bit(c+d) 11890 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 11891 for { 11892 c := v.AuxInt 11893 sym := v.Aux 11894 mem := v.Args[2] 11895 ptr := v.Args[0] 11896 v_1 := v.Args[1] 11897 if v_1.Op != OpAMD64ADDQconst { 11898 break 11899 } 11900 d := v_1.AuxInt 11901 idx := v_1.Args[0] 11902 if !(is32Bit(c + d)) { 11903 break 11904 } 11905 v.reset(OpAMD64MOVBloadidx1) 11906 v.AuxInt = c + d 11907 v.Aux = sym 11908 v.AddArg(ptr) 11909 v.AddArg(idx) 11910 v.AddArg(mem) 11911 return true 11912 } 11913 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 11914 // cond: is32Bit(c+d) 11915 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 11916 for { 11917 c := v.AuxInt 11918 sym := v.Aux 11919 mem := v.Args[2] 11920 v_0 := v.Args[0] 11921 if v_0.Op != OpAMD64ADDQconst { 11922 break 11923 } 11924 d := v_0.AuxInt 11925 idx := v_0.Args[0] 11926 ptr := v.Args[1] 11927 if !(is32Bit(c + d)) { 11928 break 11929 } 11930 v.reset(OpAMD64MOVBloadidx1) 11931 v.AuxInt = c + d 11932 v.Aux = sym 11933 v.AddArg(ptr) 11934 v.AddArg(idx) 11935 v.AddArg(mem) 11936 return true 11937 } 11938 // match: (MOVBloadidx1 [i] {s} p (MOVQconst [c]) mem) 11939 // cond: is32Bit(i+c) 11940 // result: (MOVBload [i+c] {s} p mem) 11941 for { 11942 i := v.AuxInt 11943 s := v.Aux 11944 mem := v.Args[2] 11945 p := v.Args[0] 11946 v_1 := v.Args[1] 11947 if v_1.Op != OpAMD64MOVQconst { 11948 break 11949 } 11950 c := v_1.AuxInt 11951 if !(is32Bit(i + c)) { 11952 break 11953 } 11954 v.reset(OpAMD64MOVBload) 11955 v.AuxInt = i + c 11956 v.Aux = s 11957 v.AddArg(p) 11958 v.AddArg(mem) 11959 return true 11960 } 11961 // match: (MOVBloadidx1 [i] {s} (MOVQconst [c]) p mem) 11962 // cond: is32Bit(i+c) 11963 // result: (MOVBload [i+c] {s} p mem) 11964 for { 11965 i := v.AuxInt 11966 s := v.Aux 11967 mem := v.Args[2] 11968 v_0 := v.Args[0] 11969 if v_0.Op != OpAMD64MOVQconst { 11970 break 11971 } 11972 c := v_0.AuxInt 11973 p := v.Args[1] 11974 if !(is32Bit(i + c)) { 11975 break 11976 } 11977 v.reset(OpAMD64MOVBload) 11978 v.AuxInt = i + c 11979 v.Aux = s 11980 v.AddArg(p) 11981 v.AddArg(mem) 11982 return true 11983 } 11984 return false 11985 } 11986 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { 11987 // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) 11988 // cond: y.Uses == 1 11989 // result: (SETLstore [off] {sym} ptr x mem) 11990 for { 11991 off := v.AuxInt 11992 sym := v.Aux 11993 mem := v.Args[2] 11994 ptr := v.Args[0] 11995 y := v.Args[1] 11996 if y.Op != OpAMD64SETL { 11997 break 11998 } 11999 x := y.Args[0] 12000 if !(y.Uses == 1) { 12001 break 12002 } 12003 v.reset(OpAMD64SETLstore) 12004 v.AuxInt = off 12005 v.Aux = sym 12006 v.AddArg(ptr) 12007 v.AddArg(x) 12008 v.AddArg(mem) 12009 return true 12010 } 12011 // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) 12012 // cond: y.Uses == 1 12013 // result: (SETLEstore [off] {sym} ptr x mem) 12014 for { 12015 off := v.AuxInt 12016 sym := v.Aux 12017 mem := v.Args[2] 12018 ptr := v.Args[0] 12019 y := v.Args[1] 12020 if y.Op != OpAMD64SETLE { 12021 break 12022 } 12023 x := y.Args[0] 12024 if !(y.Uses == 1) { 12025 break 12026 } 12027 v.reset(OpAMD64SETLEstore) 12028 v.AuxInt = off 12029 v.Aux = sym 12030 v.AddArg(ptr) 12031 v.AddArg(x) 12032 v.AddArg(mem) 12033 return true 12034 } 12035 // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) 12036 // cond: y.Uses == 1 12037 // result: (SETGstore [off] {sym} ptr x mem) 12038 for { 12039 off := v.AuxInt 12040 sym := v.Aux 12041 mem := v.Args[2] 12042 ptr := v.Args[0] 12043 y := v.Args[1] 12044 if y.Op != OpAMD64SETG { 12045 break 12046 } 12047 x := y.Args[0] 12048 if !(y.Uses == 1) { 12049 break 12050 } 12051 v.reset(OpAMD64SETGstore) 12052 v.AuxInt = off 12053 v.Aux = sym 12054 v.AddArg(ptr) 12055 v.AddArg(x) 12056 v.AddArg(mem) 12057 return true 12058 } 12059 // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) 12060 // cond: y.Uses == 1 12061 // result: (SETGEstore [off] {sym} ptr x mem) 12062 for { 12063 off := v.AuxInt 12064 sym := v.Aux 12065 mem := v.Args[2] 12066 ptr := v.Args[0] 12067 y := v.Args[1] 12068 if y.Op != OpAMD64SETGE { 12069 break 12070 } 12071 x := y.Args[0] 12072 if !(y.Uses == 1) { 12073 break 12074 } 12075 v.reset(OpAMD64SETGEstore) 12076 v.AuxInt = off 12077 v.Aux = sym 12078 v.AddArg(ptr) 12079 v.AddArg(x) 12080 v.AddArg(mem) 12081 return true 12082 } 12083 // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) 12084 // cond: y.Uses == 1 12085 // result: (SETEQstore [off] {sym} ptr x mem) 12086 for { 12087 off := v.AuxInt 12088 sym := v.Aux 12089 mem := v.Args[2] 12090 ptr := v.Args[0] 12091 y := v.Args[1] 12092 if y.Op != OpAMD64SETEQ { 12093 break 12094 } 12095 x := y.Args[0] 12096 if !(y.Uses == 1) { 12097 break 12098 } 12099 v.reset(OpAMD64SETEQstore) 12100 v.AuxInt = off 12101 v.Aux = sym 12102 v.AddArg(ptr) 12103 v.AddArg(x) 12104 v.AddArg(mem) 12105 return true 12106 } 12107 // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) 12108 // cond: y.Uses == 1 12109 // result: (SETNEstore [off] {sym} ptr x mem) 12110 for { 12111 off := v.AuxInt 12112 sym := v.Aux 12113 mem := v.Args[2] 12114 ptr := v.Args[0] 12115 y := v.Args[1] 12116 if y.Op != OpAMD64SETNE { 12117 break 12118 } 12119 x := y.Args[0] 12120 if !(y.Uses == 1) { 12121 break 12122 } 12123 v.reset(OpAMD64SETNEstore) 12124 v.AuxInt = off 12125 v.Aux = sym 12126 v.AddArg(ptr) 12127 v.AddArg(x) 12128 v.AddArg(mem) 12129 return true 12130 } 12131 // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) 12132 // cond: y.Uses == 1 12133 // result: (SETBstore [off] {sym} ptr x mem) 12134 for { 12135 off := v.AuxInt 12136 sym := v.Aux 12137 mem := v.Args[2] 12138 ptr := v.Args[0] 12139 y := v.Args[1] 12140 if y.Op != OpAMD64SETB { 12141 break 12142 } 12143 x := y.Args[0] 12144 if !(y.Uses == 1) { 12145 break 12146 } 12147 v.reset(OpAMD64SETBstore) 12148 v.AuxInt = off 12149 v.Aux = sym 12150 v.AddArg(ptr) 12151 v.AddArg(x) 12152 v.AddArg(mem) 12153 return true 12154 } 12155 // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) 12156 // cond: y.Uses == 1 12157 // result: (SETBEstore [off] {sym} ptr x mem) 12158 for { 12159 off := v.AuxInt 12160 sym := v.Aux 12161 mem := v.Args[2] 12162 ptr := v.Args[0] 12163 y := v.Args[1] 12164 if y.Op != OpAMD64SETBE { 12165 break 12166 } 12167 x := y.Args[0] 12168 if !(y.Uses == 1) { 12169 break 12170 } 12171 v.reset(OpAMD64SETBEstore) 12172 v.AuxInt = off 12173 v.Aux = sym 12174 v.AddArg(ptr) 12175 v.AddArg(x) 12176 v.AddArg(mem) 12177 return true 12178 } 12179 // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) 12180 // cond: y.Uses == 1 12181 // result: (SETAstore [off] {sym} ptr x mem) 12182 for { 12183 off := v.AuxInt 12184 sym := v.Aux 12185 mem := v.Args[2] 12186 ptr := v.Args[0] 12187 y := v.Args[1] 12188 if y.Op != OpAMD64SETA { 12189 break 12190 } 12191 x := y.Args[0] 12192 if !(y.Uses == 1) { 12193 break 12194 } 12195 v.reset(OpAMD64SETAstore) 12196 v.AuxInt = off 12197 v.Aux = sym 12198 v.AddArg(ptr) 12199 v.AddArg(x) 12200 v.AddArg(mem) 12201 return true 12202 } 12203 // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) 12204 // cond: y.Uses == 1 12205 // result: (SETAEstore [off] {sym} ptr x mem) 12206 for { 12207 off := v.AuxInt 12208 sym := v.Aux 12209 mem := v.Args[2] 12210 ptr := v.Args[0] 12211 y := v.Args[1] 12212 if y.Op != OpAMD64SETAE { 12213 break 12214 } 12215 x := y.Args[0] 12216 if !(y.Uses == 1) { 12217 break 12218 } 12219 v.reset(OpAMD64SETAEstore) 12220 v.AuxInt = off 12221 v.Aux = sym 12222 v.AddArg(ptr) 12223 v.AddArg(x) 12224 v.AddArg(mem) 12225 return true 12226 } 12227 return false 12228 } 12229 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { 12230 b := v.Block 12231 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 12232 // result: (MOVBstore [off] {sym} ptr x mem) 12233 for { 12234 off := v.AuxInt 12235 sym := v.Aux 12236 mem := v.Args[2] 12237 ptr := v.Args[0] 12238 v_1 := v.Args[1] 12239 if v_1.Op != OpAMD64MOVBQSX { 12240 break 12241 } 12242 x := v_1.Args[0] 12243 v.reset(OpAMD64MOVBstore) 12244 v.AuxInt = off 12245 v.Aux = sym 12246 v.AddArg(ptr) 12247 v.AddArg(x) 12248 v.AddArg(mem) 12249 return true 12250 } 12251 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 12252 // result: (MOVBstore [off] {sym} ptr x mem) 12253 for { 12254 off := v.AuxInt 12255 sym := v.Aux 12256 mem := v.Args[2] 12257 ptr := v.Args[0] 12258 v_1 := v.Args[1] 12259 if v_1.Op != OpAMD64MOVBQZX { 12260 break 12261 } 12262 x := v_1.Args[0] 12263 v.reset(OpAMD64MOVBstore) 12264 v.AuxInt = off 12265 v.Aux = sym 12266 v.AddArg(ptr) 12267 v.AddArg(x) 12268 v.AddArg(mem) 12269 return true 12270 } 12271 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 12272 // cond: is32Bit(off1+off2) 12273 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 12274 for { 12275 off1 := v.AuxInt 12276 sym := v.Aux 12277 mem := v.Args[2] 12278 v_0 := v.Args[0] 12279 if v_0.Op != OpAMD64ADDQconst { 12280 break 12281 } 12282 off2 := v_0.AuxInt 12283 ptr := v_0.Args[0] 12284 val := v.Args[1] 12285 if !(is32Bit(off1 + off2)) { 12286 break 12287 } 12288 v.reset(OpAMD64MOVBstore) 12289 v.AuxInt = off1 + off2 12290 v.Aux = sym 12291 v.AddArg(ptr) 12292 v.AddArg(val) 12293 v.AddArg(mem) 12294 return true 12295 } 12296 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 12297 // cond: validOff(off) 12298 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 12299 for { 12300 off := v.AuxInt 12301 sym := v.Aux 12302 mem := v.Args[2] 12303 ptr := v.Args[0] 12304 v_1 := v.Args[1] 12305 if v_1.Op != OpAMD64MOVLconst { 12306 break 12307 } 12308 c := v_1.AuxInt 12309 if !(validOff(off)) { 12310 break 12311 } 12312 v.reset(OpAMD64MOVBstoreconst) 12313 v.AuxInt = makeValAndOff(int64(int8(c)), off) 12314 v.Aux = sym 12315 v.AddArg(ptr) 12316 v.AddArg(mem) 12317 return true 12318 } 12319 // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem) 12320 // cond: validOff(off) 12321 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 12322 for { 12323 off := v.AuxInt 12324 sym := v.Aux 12325 mem := v.Args[2] 12326 ptr := v.Args[0] 12327 v_1 := v.Args[1] 12328 if v_1.Op != OpAMD64MOVQconst { 12329 break 12330 } 12331 c := v_1.AuxInt 12332 if !(validOff(off)) { 12333 break 12334 } 12335 v.reset(OpAMD64MOVBstoreconst) 12336 v.AuxInt = makeValAndOff(int64(int8(c)), off) 12337 v.Aux = sym 12338 v.AddArg(ptr) 12339 v.AddArg(mem) 12340 return true 12341 } 12342 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 12343 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12344 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 12345 for { 12346 off1 := v.AuxInt 12347 sym1 := v.Aux 12348 mem := v.Args[2] 12349 v_0 := v.Args[0] 12350 if v_0.Op != OpAMD64LEAQ { 12351 break 12352 } 12353 off2 := v_0.AuxInt 12354 sym2 := v_0.Aux 12355 base := v_0.Args[0] 12356 val := v.Args[1] 12357 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12358 break 12359 } 12360 v.reset(OpAMD64MOVBstore) 12361 v.AuxInt = off1 + off2 12362 v.Aux = mergeSym(sym1, sym2) 12363 v.AddArg(base) 12364 v.AddArg(val) 12365 v.AddArg(mem) 12366 return true 12367 } 12368 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 12369 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12370 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 12371 for { 12372 off1 := v.AuxInt 12373 sym1 := v.Aux 12374 mem := v.Args[2] 12375 v_0 := v.Args[0] 12376 if v_0.Op != OpAMD64LEAQ1 { 12377 break 12378 } 12379 off2 := v_0.AuxInt 12380 sym2 := v_0.Aux 12381 idx := v_0.Args[1] 12382 ptr := v_0.Args[0] 12383 val := v.Args[1] 12384 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12385 break 12386 } 12387 v.reset(OpAMD64MOVBstoreidx1) 12388 v.AuxInt = off1 + off2 12389 v.Aux = mergeSym(sym1, sym2) 12390 v.AddArg(ptr) 12391 v.AddArg(idx) 12392 v.AddArg(val) 12393 v.AddArg(mem) 12394 return true 12395 } 12396 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 12397 // cond: ptr.Op != OpSB 12398 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 12399 for { 12400 off := v.AuxInt 12401 sym := v.Aux 12402 mem := v.Args[2] 12403 v_0 := v.Args[0] 12404 if v_0.Op != OpAMD64ADDQ { 12405 break 12406 } 12407 idx := v_0.Args[1] 12408 ptr := v_0.Args[0] 12409 val := v.Args[1] 12410 if !(ptr.Op != OpSB) { 12411 break 12412 } 12413 v.reset(OpAMD64MOVBstoreidx1) 12414 v.AuxInt = off 12415 v.Aux = sym 12416 v.AddArg(ptr) 12417 v.AddArg(idx) 12418 v.AddArg(val) 12419 v.AddArg(mem) 12420 return true 12421 } 12422 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 12423 // cond: x0.Uses == 1 && clobber(x0) 12424 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 12425 for { 12426 i := v.AuxInt 12427 s := v.Aux 12428 _ = v.Args[2] 12429 p := v.Args[0] 12430 w := v.Args[1] 12431 x0 := v.Args[2] 12432 if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s { 12433 break 12434 } 12435 mem := x0.Args[2] 12436 if p != x0.Args[0] { 12437 break 12438 } 12439 x0_1 := x0.Args[1] 12440 if x0_1.Op != OpAMD64SHRWconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) { 12441 break 12442 } 12443 v.reset(OpAMD64MOVWstore) 12444 v.AuxInt = i - 1 12445 v.Aux = s 12446 v.AddArg(p) 12447 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type) 12448 v0.AuxInt = 8 12449 v0.AddArg(w) 12450 v.AddArg(v0) 12451 v.AddArg(mem) 12452 return true 12453 } 12454 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 12455 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 12456 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 12457 for { 12458 i := v.AuxInt 12459 s := v.Aux 12460 _ = v.Args[2] 12461 p := v.Args[0] 12462 w := v.Args[1] 12463 x2 := v.Args[2] 12464 if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i-1 || x2.Aux != s { 12465 break 12466 } 12467 _ = x2.Args[2] 12468 if p != x2.Args[0] { 12469 break 12470 } 12471 x2_1 := x2.Args[1] 12472 if x2_1.Op != OpAMD64SHRLconst || x2_1.AuxInt != 8 || w != x2_1.Args[0] { 12473 break 12474 } 12475 x1 := x2.Args[2] 12476 if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s { 12477 break 12478 } 12479 _ = x1.Args[2] 12480 if p != x1.Args[0] { 12481 break 12482 } 12483 x1_1 := x1.Args[1] 12484 if x1_1.Op != OpAMD64SHRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] { 12485 break 12486 } 12487 x0 := x1.Args[2] 12488 if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-3 || x0.Aux != s { 12489 break 12490 } 12491 mem := x0.Args[2] 12492 if p != x0.Args[0] { 12493 break 12494 } 12495 x0_1 := x0.Args[1] 12496 if x0_1.Op != OpAMD64SHRLconst || x0_1.AuxInt != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 12497 break 12498 } 12499 v.reset(OpAMD64MOVLstore) 12500 v.AuxInt = i - 3 12501 v.Aux = s 12502 v.AddArg(p) 12503 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type) 12504 v0.AddArg(w) 12505 v.AddArg(v0) 12506 v.AddArg(mem) 12507 return true 12508 } 12509 return false 12510 } 12511 func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { 12512 b := v.Block 12513 typ := &b.Func.Config.Types 12514 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 12515 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 12516 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 12517 for { 12518 i := v.AuxInt 12519 s := v.Aux 12520 _ = v.Args[2] 12521 p := v.Args[0] 12522 w := v.Args[1] 12523 x6 := v.Args[2] 12524 if x6.Op != OpAMD64MOVBstore || x6.AuxInt != i-1 || x6.Aux != s { 12525 break 12526 } 12527 _ = x6.Args[2] 12528 if p != x6.Args[0] { 12529 break 12530 } 12531 x6_1 := x6.Args[1] 12532 if x6_1.Op != OpAMD64SHRQconst || x6_1.AuxInt != 8 || w != x6_1.Args[0] { 12533 break 12534 } 12535 x5 := x6.Args[2] 12536 if x5.Op != OpAMD64MOVBstore || x5.AuxInt != i-2 || x5.Aux != s { 12537 break 12538 } 12539 _ = x5.Args[2] 12540 if p != x5.Args[0] { 12541 break 12542 } 12543 x5_1 := x5.Args[1] 12544 if x5_1.Op != OpAMD64SHRQconst || x5_1.AuxInt != 16 || w != x5_1.Args[0] { 12545 break 12546 } 12547 x4 := x5.Args[2] 12548 if x4.Op != OpAMD64MOVBstore || x4.AuxInt != i-3 || x4.Aux != s { 12549 break 12550 } 12551 _ = x4.Args[2] 12552 if p != x4.Args[0] { 12553 break 12554 } 12555 x4_1 := x4.Args[1] 12556 if x4_1.Op != OpAMD64SHRQconst || x4_1.AuxInt != 24 || w != x4_1.Args[0] { 12557 break 12558 } 12559 x3 := x4.Args[2] 12560 if x3.Op != OpAMD64MOVBstore || x3.AuxInt != i-4 || x3.Aux != s { 12561 break 12562 } 12563 _ = x3.Args[2] 12564 if p != x3.Args[0] { 12565 break 12566 } 12567 x3_1 := x3.Args[1] 12568 if x3_1.Op != OpAMD64SHRQconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] { 12569 break 12570 } 12571 x2 := x3.Args[2] 12572 if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i-5 || x2.Aux != s { 12573 break 12574 } 12575 _ = x2.Args[2] 12576 if p != x2.Args[0] { 12577 break 12578 } 12579 x2_1 := x2.Args[1] 12580 if x2_1.Op != OpAMD64SHRQconst || x2_1.AuxInt != 40 || w != x2_1.Args[0] { 12581 break 12582 } 12583 x1 := x2.Args[2] 12584 if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i-6 || x1.Aux != s { 12585 break 12586 } 12587 _ = x1.Args[2] 12588 if p != x1.Args[0] { 12589 break 12590 } 12591 x1_1 := x1.Args[1] 12592 if x1_1.Op != OpAMD64SHRQconst || x1_1.AuxInt != 48 || w != x1_1.Args[0] { 12593 break 12594 } 12595 x0 := x1.Args[2] 12596 if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-7 || x0.Aux != s { 12597 break 12598 } 12599 mem := x0.Args[2] 12600 if p != x0.Args[0] { 12601 break 12602 } 12603 x0_1 := x0.Args[1] 12604 if x0_1.Op != OpAMD64SHRQconst || x0_1.AuxInt != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 12605 break 12606 } 12607 v.reset(OpAMD64MOVQstore) 12608 v.AuxInt = i - 7 12609 v.Aux = s 12610 v.AddArg(p) 12611 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type) 12612 v0.AddArg(w) 12613 v.AddArg(v0) 12614 v.AddArg(mem) 12615 return true 12616 } 12617 // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 12618 // cond: x.Uses == 1 && clobber(x) 12619 // result: (MOVWstore [i-1] {s} p w mem) 12620 for { 12621 i := v.AuxInt 12622 s := v.Aux 12623 _ = v.Args[2] 12624 p := v.Args[0] 12625 v_1 := v.Args[1] 12626 if v_1.Op != OpAMD64SHRWconst || v_1.AuxInt != 8 { 12627 break 12628 } 12629 w := v_1.Args[0] 12630 x := v.Args[2] 12631 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { 12632 break 12633 } 12634 mem := x.Args[2] 12635 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) { 12636 break 12637 } 12638 v.reset(OpAMD64MOVWstore) 12639 v.AuxInt = i - 1 12640 v.Aux = s 12641 v.AddArg(p) 12642 v.AddArg(w) 12643 v.AddArg(mem) 12644 return true 12645 } 12646 // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 12647 // cond: x.Uses == 1 && clobber(x) 12648 // result: (MOVWstore [i-1] {s} p w mem) 12649 for { 12650 i := v.AuxInt 12651 s := v.Aux 12652 _ = v.Args[2] 12653 p := v.Args[0] 12654 v_1 := v.Args[1] 12655 if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 8 { 12656 break 12657 } 12658 w := v_1.Args[0] 12659 x := v.Args[2] 12660 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { 12661 break 12662 } 12663 mem := x.Args[2] 12664 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) { 12665 break 12666 } 12667 v.reset(OpAMD64MOVWstore) 12668 v.AuxInt = i - 1 12669 v.Aux = s 12670 v.AddArg(p) 12671 v.AddArg(w) 12672 v.AddArg(mem) 12673 return true 12674 } 12675 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 12676 // cond: x.Uses == 1 && clobber(x) 12677 // result: (MOVWstore [i-1] {s} p w mem) 12678 for { 12679 i := v.AuxInt 12680 s := v.Aux 12681 _ = v.Args[2] 12682 p := v.Args[0] 12683 v_1 := v.Args[1] 12684 if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 8 { 12685 break 12686 } 12687 w := v_1.Args[0] 12688 x := v.Args[2] 12689 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { 12690 break 12691 } 12692 mem := x.Args[2] 12693 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) { 12694 break 12695 } 12696 v.reset(OpAMD64MOVWstore) 12697 v.AuxInt = i - 1 12698 v.Aux = s 12699 v.AddArg(p) 12700 v.AddArg(w) 12701 v.AddArg(mem) 12702 return true 12703 } 12704 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem)) 12705 // cond: x.Uses == 1 && clobber(x) 12706 // result: (MOVWstore [i] {s} p w mem) 12707 for { 12708 i := v.AuxInt 12709 s := v.Aux 12710 _ = v.Args[2] 12711 p := v.Args[0] 12712 w := v.Args[1] 12713 x := v.Args[2] 12714 if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s { 12715 break 12716 } 12717 mem := x.Args[2] 12718 if p != x.Args[0] { 12719 break 12720 } 12721 x_1 := x.Args[1] 12722 if x_1.Op != OpAMD64SHRWconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) { 12723 break 12724 } 12725 v.reset(OpAMD64MOVWstore) 12726 v.AuxInt = i 12727 v.Aux = s 12728 v.AddArg(p) 12729 v.AddArg(w) 12730 v.AddArg(mem) 12731 return true 12732 } 12733 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem)) 12734 // cond: x.Uses == 1 && clobber(x) 12735 // result: (MOVWstore [i] {s} p w mem) 12736 for { 12737 i := v.AuxInt 12738 s := v.Aux 12739 _ = v.Args[2] 12740 p := v.Args[0] 12741 w := v.Args[1] 12742 x := v.Args[2] 12743 if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s { 12744 break 12745 } 12746 mem := x.Args[2] 12747 if p != x.Args[0] { 12748 break 12749 } 12750 x_1 := x.Args[1] 12751 if x_1.Op != OpAMD64SHRLconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) { 12752 break 12753 } 12754 v.reset(OpAMD64MOVWstore) 12755 v.AuxInt = i 12756 v.Aux = s 12757 v.AddArg(p) 12758 v.AddArg(w) 12759 v.AddArg(mem) 12760 return true 12761 } 12762 // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem)) 12763 // cond: x.Uses == 1 && clobber(x) 12764 // result: (MOVWstore [i] {s} p w mem) 12765 for { 12766 i := v.AuxInt 12767 s := v.Aux 12768 _ = v.Args[2] 12769 p := v.Args[0] 12770 w := v.Args[1] 12771 x := v.Args[2] 12772 if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s { 12773 break 12774 } 12775 mem := x.Args[2] 12776 if p != x.Args[0] { 12777 break 12778 } 12779 x_1 := x.Args[1] 12780 if x_1.Op != OpAMD64SHRQconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) { 12781 break 12782 } 12783 v.reset(OpAMD64MOVWstore) 12784 v.AuxInt = i 12785 v.Aux = s 12786 v.AddArg(p) 12787 v.AddArg(w) 12788 v.AddArg(mem) 12789 return true 12790 } 12791 // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) 12792 // cond: x.Uses == 1 && clobber(x) 12793 // result: (MOVWstore [i-1] {s} p w0 mem) 12794 for { 12795 i := v.AuxInt 12796 s := v.Aux 12797 _ = v.Args[2] 12798 p := v.Args[0] 12799 v_1 := v.Args[1] 12800 if v_1.Op != OpAMD64SHRLconst { 12801 break 12802 } 12803 j := v_1.AuxInt 12804 w := v_1.Args[0] 12805 x := v.Args[2] 12806 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { 12807 break 12808 } 12809 mem := x.Args[2] 12810 if p != x.Args[0] { 12811 break 12812 } 12813 w0 := x.Args[1] 12814 if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 12815 break 12816 } 12817 v.reset(OpAMD64MOVWstore) 12818 v.AuxInt = i - 1 12819 v.Aux = s 12820 v.AddArg(p) 12821 v.AddArg(w0) 12822 v.AddArg(mem) 12823 return true 12824 } 12825 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 12826 // cond: x.Uses == 1 && clobber(x) 12827 // result: (MOVWstore [i-1] {s} p w0 mem) 12828 for { 12829 i := v.AuxInt 12830 s := v.Aux 12831 _ = v.Args[2] 12832 p := v.Args[0] 12833 v_1 := v.Args[1] 12834 if v_1.Op != OpAMD64SHRQconst { 12835 break 12836 } 12837 j := v_1.AuxInt 12838 w := v_1.Args[0] 12839 x := v.Args[2] 12840 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { 12841 break 12842 } 12843 mem := x.Args[2] 12844 if p != x.Args[0] { 12845 break 12846 } 12847 w0 := x.Args[1] 12848 if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 12849 break 12850 } 12851 v.reset(OpAMD64MOVWstore) 12852 v.AuxInt = i - 1 12853 v.Aux = s 12854 v.AddArg(p) 12855 v.AddArg(w0) 12856 v.AddArg(mem) 12857 return true 12858 } 12859 // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) 12860 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 12861 // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) 12862 for { 12863 i := v.AuxInt 12864 s := v.Aux 12865 _ = v.Args[2] 12866 p := v.Args[0] 12867 x1 := v.Args[1] 12868 if x1.Op != OpAMD64MOVBload { 12869 break 12870 } 12871 j := x1.AuxInt 12872 s2 := x1.Aux 12873 mem := x1.Args[1] 12874 p2 := x1.Args[0] 12875 mem2 := v.Args[2] 12876 if mem2.Op != OpAMD64MOVBstore || mem2.AuxInt != i-1 || mem2.Aux != s { 12877 break 12878 } 12879 _ = mem2.Args[2] 12880 if p != mem2.Args[0] { 12881 break 12882 } 12883 x2 := mem2.Args[1] 12884 if x2.Op != OpAMD64MOVBload || x2.AuxInt != j-1 || x2.Aux != s2 { 12885 break 12886 } 12887 _ = x2.Args[1] 12888 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 12889 break 12890 } 12891 v.reset(OpAMD64MOVWstore) 12892 v.AuxInt = i - 1 12893 v.Aux = s 12894 v.AddArg(p) 12895 v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16) 12896 v0.AuxInt = j - 1 12897 v0.Aux = s2 12898 v0.AddArg(p2) 12899 v0.AddArg(mem) 12900 v.AddArg(v0) 12901 v.AddArg(mem) 12902 return true 12903 } 12904 return false 12905 } 12906 func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool { 12907 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 12908 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 12909 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 12910 for { 12911 off1 := v.AuxInt 12912 sym1 := v.Aux 12913 mem := v.Args[2] 12914 v_0 := v.Args[0] 12915 if v_0.Op != OpAMD64LEAL { 12916 break 12917 } 12918 off2 := v_0.AuxInt 12919 sym2 := v_0.Aux 12920 base := v_0.Args[0] 12921 val := v.Args[1] 12922 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 12923 break 12924 } 12925 v.reset(OpAMD64MOVBstore) 12926 v.AuxInt = off1 + off2 12927 v.Aux = mergeSym(sym1, sym2) 12928 v.AddArg(base) 12929 v.AddArg(val) 12930 v.AddArg(mem) 12931 return true 12932 } 12933 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 12934 // cond: is32Bit(off1+off2) 12935 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 12936 for { 12937 off1 := v.AuxInt 12938 sym := v.Aux 12939 mem := v.Args[2] 12940 v_0 := v.Args[0] 12941 if v_0.Op != OpAMD64ADDLconst { 12942 break 12943 } 12944 off2 := v_0.AuxInt 12945 ptr := v_0.Args[0] 12946 val := v.Args[1] 12947 if !(is32Bit(off1 + off2)) { 12948 break 12949 } 12950 v.reset(OpAMD64MOVBstore) 12951 v.AuxInt = off1 + off2 12952 v.Aux = sym 12953 v.AddArg(ptr) 12954 v.AddArg(val) 12955 v.AddArg(mem) 12956 return true 12957 } 12958 return false 12959 } 12960 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { 12961 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 12962 // cond: ValAndOff(sc).canAdd(off) 12963 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 12964 for { 12965 sc := v.AuxInt 12966 s := v.Aux 12967 mem := v.Args[1] 12968 v_0 := v.Args[0] 12969 if v_0.Op != OpAMD64ADDQconst { 12970 break 12971 } 12972 off := v_0.AuxInt 12973 ptr := v_0.Args[0] 12974 if !(ValAndOff(sc).canAdd(off)) { 12975 break 12976 } 12977 v.reset(OpAMD64MOVBstoreconst) 12978 v.AuxInt = ValAndOff(sc).add(off) 12979 v.Aux = s 12980 v.AddArg(ptr) 12981 v.AddArg(mem) 12982 return true 12983 } 12984 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 12985 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 12986 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 12987 for { 12988 sc := v.AuxInt 12989 sym1 := v.Aux 12990 mem := v.Args[1] 12991 v_0 := v.Args[0] 12992 if v_0.Op != OpAMD64LEAQ { 12993 break 12994 } 12995 off := v_0.AuxInt 12996 sym2 := v_0.Aux 12997 ptr := v_0.Args[0] 12998 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 12999 break 13000 } 13001 v.reset(OpAMD64MOVBstoreconst) 13002 v.AuxInt = ValAndOff(sc).add(off) 13003 v.Aux = mergeSym(sym1, sym2) 13004 v.AddArg(ptr) 13005 v.AddArg(mem) 13006 return true 13007 } 13008 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 13009 // cond: canMergeSym(sym1, sym2) 13010 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 13011 for { 13012 x := v.AuxInt 13013 sym1 := v.Aux 13014 mem := v.Args[1] 13015 v_0 := v.Args[0] 13016 if v_0.Op != OpAMD64LEAQ1 { 13017 break 13018 } 13019 off := v_0.AuxInt 13020 sym2 := v_0.Aux 13021 idx := v_0.Args[1] 13022 ptr := v_0.Args[0] 13023 if !(canMergeSym(sym1, sym2)) { 13024 break 13025 } 13026 v.reset(OpAMD64MOVBstoreconstidx1) 13027 v.AuxInt = ValAndOff(x).add(off) 13028 v.Aux = mergeSym(sym1, sym2) 13029 v.AddArg(ptr) 13030 v.AddArg(idx) 13031 v.AddArg(mem) 13032 return true 13033 } 13034 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 13035 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 13036 for { 13037 x := v.AuxInt 13038 sym := v.Aux 13039 mem := v.Args[1] 13040 v_0 := v.Args[0] 13041 if v_0.Op != OpAMD64ADDQ { 13042 break 13043 } 13044 idx := v_0.Args[1] 13045 ptr := v_0.Args[0] 13046 v.reset(OpAMD64MOVBstoreconstidx1) 13047 v.AuxInt = x 13048 v.Aux = sym 13049 v.AddArg(ptr) 13050 v.AddArg(idx) 13051 v.AddArg(mem) 13052 return true 13053 } 13054 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 13055 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 13056 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 13057 for { 13058 c := v.AuxInt 13059 s := v.Aux 13060 _ = v.Args[1] 13061 p := v.Args[0] 13062 x := v.Args[1] 13063 if x.Op != OpAMD64MOVBstoreconst { 13064 break 13065 } 13066 a := x.AuxInt 13067 if x.Aux != s { 13068 break 13069 } 13070 mem := x.Args[1] 13071 if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 13072 break 13073 } 13074 v.reset(OpAMD64MOVWstoreconst) 13075 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 13076 v.Aux = s 13077 v.AddArg(p) 13078 v.AddArg(mem) 13079 return true 13080 } 13081 // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) 13082 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 13083 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 13084 for { 13085 a := v.AuxInt 13086 s := v.Aux 13087 _ = v.Args[1] 13088 p := v.Args[0] 13089 x := v.Args[1] 13090 if x.Op != OpAMD64MOVBstoreconst { 13091 break 13092 } 13093 c := x.AuxInt 13094 if x.Aux != s { 13095 break 13096 } 13097 mem := x.Args[1] 13098 if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 13099 break 13100 } 13101 v.reset(OpAMD64MOVWstoreconst) 13102 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 13103 v.Aux = s 13104 v.AddArg(p) 13105 v.AddArg(mem) 13106 return true 13107 } 13108 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 13109 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 13110 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 13111 for { 13112 sc := v.AuxInt 13113 sym1 := v.Aux 13114 mem := v.Args[1] 13115 v_0 := v.Args[0] 13116 if v_0.Op != OpAMD64LEAL { 13117 break 13118 } 13119 off := v_0.AuxInt 13120 sym2 := v_0.Aux 13121 ptr := v_0.Args[0] 13122 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 13123 break 13124 } 13125 v.reset(OpAMD64MOVBstoreconst) 13126 v.AuxInt = ValAndOff(sc).add(off) 13127 v.Aux = mergeSym(sym1, sym2) 13128 v.AddArg(ptr) 13129 v.AddArg(mem) 13130 return true 13131 } 13132 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 13133 // cond: ValAndOff(sc).canAdd(off) 13134 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 13135 for { 13136 sc := v.AuxInt 13137 s := v.Aux 13138 mem := v.Args[1] 13139 v_0 := v.Args[0] 13140 if v_0.Op != OpAMD64ADDLconst { 13141 break 13142 } 13143 off := v_0.AuxInt 13144 ptr := v_0.Args[0] 13145 if !(ValAndOff(sc).canAdd(off)) { 13146 break 13147 } 13148 v.reset(OpAMD64MOVBstoreconst) 13149 v.AuxInt = ValAndOff(sc).add(off) 13150 v.Aux = s 13151 v.AddArg(ptr) 13152 v.AddArg(mem) 13153 return true 13154 } 13155 return false 13156 } 13157 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { 13158 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 13159 // cond: ValAndOff(x).canAdd(c) 13160 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13161 for { 13162 x := v.AuxInt 13163 sym := v.Aux 13164 mem := v.Args[2] 13165 v_0 := v.Args[0] 13166 if v_0.Op != OpAMD64ADDQconst { 13167 break 13168 } 13169 c := v_0.AuxInt 13170 ptr := v_0.Args[0] 13171 idx := v.Args[1] 13172 if !(ValAndOff(x).canAdd(c)) { 13173 break 13174 } 13175 v.reset(OpAMD64MOVBstoreconstidx1) 13176 v.AuxInt = ValAndOff(x).add(c) 13177 v.Aux = sym 13178 v.AddArg(ptr) 13179 v.AddArg(idx) 13180 v.AddArg(mem) 13181 return true 13182 } 13183 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 13184 // cond: ValAndOff(x).canAdd(c) 13185 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13186 for { 13187 x := v.AuxInt 13188 sym := v.Aux 13189 mem := v.Args[2] 13190 ptr := v.Args[0] 13191 v_1 := v.Args[1] 13192 if v_1.Op != OpAMD64ADDQconst { 13193 break 13194 } 13195 c := v_1.AuxInt 13196 idx := v_1.Args[0] 13197 if !(ValAndOff(x).canAdd(c)) { 13198 break 13199 } 13200 v.reset(OpAMD64MOVBstoreconstidx1) 13201 v.AuxInt = ValAndOff(x).add(c) 13202 v.Aux = sym 13203 v.AddArg(ptr) 13204 v.AddArg(idx) 13205 v.AddArg(mem) 13206 return true 13207 } 13208 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 13209 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 13210 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 13211 for { 13212 c := v.AuxInt 13213 s := v.Aux 13214 _ = v.Args[2] 13215 p := v.Args[0] 13216 i := v.Args[1] 13217 x := v.Args[2] 13218 if x.Op != OpAMD64MOVBstoreconstidx1 { 13219 break 13220 } 13221 a := x.AuxInt 13222 if x.Aux != s { 13223 break 13224 } 13225 mem := x.Args[2] 13226 if p != x.Args[0] || i != x.Args[1] || !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 13227 break 13228 } 13229 v.reset(OpAMD64MOVWstoreconstidx1) 13230 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 13231 v.Aux = s 13232 v.AddArg(p) 13233 v.AddArg(i) 13234 v.AddArg(mem) 13235 return true 13236 } 13237 return false 13238 } 13239 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { 13240 b := v.Block 13241 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 13242 // cond: is32Bit(c+d) 13243 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 13244 for { 13245 c := v.AuxInt 13246 sym := v.Aux 13247 mem := v.Args[3] 13248 v_0 := v.Args[0] 13249 if v_0.Op != OpAMD64ADDQconst { 13250 break 13251 } 13252 d := v_0.AuxInt 13253 ptr := v_0.Args[0] 13254 idx := v.Args[1] 13255 val := v.Args[2] 13256 if !(is32Bit(c + d)) { 13257 break 13258 } 13259 v.reset(OpAMD64MOVBstoreidx1) 13260 v.AuxInt = c + d 13261 v.Aux = sym 13262 v.AddArg(ptr) 13263 v.AddArg(idx) 13264 v.AddArg(val) 13265 v.AddArg(mem) 13266 return true 13267 } 13268 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 13269 // cond: is32Bit(c+d) 13270 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 13271 for { 13272 c := v.AuxInt 13273 sym := v.Aux 13274 mem := v.Args[3] 13275 ptr := v.Args[0] 13276 v_1 := v.Args[1] 13277 if v_1.Op != OpAMD64ADDQconst { 13278 break 13279 } 13280 d := v_1.AuxInt 13281 idx := v_1.Args[0] 13282 val := v.Args[2] 13283 if !(is32Bit(c + d)) { 13284 break 13285 } 13286 v.reset(OpAMD64MOVBstoreidx1) 13287 v.AuxInt = c + d 13288 v.Aux = sym 13289 v.AddArg(ptr) 13290 v.AddArg(idx) 13291 v.AddArg(val) 13292 v.AddArg(mem) 13293 return true 13294 } 13295 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 13296 // cond: x0.Uses == 1 && clobber(x0) 13297 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 13298 for { 13299 i := v.AuxInt 13300 s := v.Aux 13301 _ = v.Args[3] 13302 p := v.Args[0] 13303 idx := v.Args[1] 13304 w := v.Args[2] 13305 x0 := v.Args[3] 13306 if x0.Op != OpAMD64MOVBstoreidx1 || x0.AuxInt != i-1 || x0.Aux != s { 13307 break 13308 } 13309 mem := x0.Args[3] 13310 if p != x0.Args[0] || idx != x0.Args[1] { 13311 break 13312 } 13313 x0_2 := x0.Args[2] 13314 if x0_2.Op != OpAMD64SHRWconst || x0_2.AuxInt != 8 || w != x0_2.Args[0] || !(x0.Uses == 1 && clobber(x0)) { 13315 break 13316 } 13317 v.reset(OpAMD64MOVWstoreidx1) 13318 v.AuxInt = i - 1 13319 v.Aux = s 13320 v.AddArg(p) 13321 v.AddArg(idx) 13322 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 13323 v0.AuxInt = 8 13324 v0.AddArg(w) 13325 v.AddArg(v0) 13326 v.AddArg(mem) 13327 return true 13328 } 13329 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 13330 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 13331 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 13332 for { 13333 i := v.AuxInt 13334 s := v.Aux 13335 _ = v.Args[3] 13336 p := v.Args[0] 13337 idx := v.Args[1] 13338 w := v.Args[2] 13339 x2 := v.Args[3] 13340 if x2.Op != OpAMD64MOVBstoreidx1 || x2.AuxInt != i-1 || x2.Aux != s { 13341 break 13342 } 13343 _ = x2.Args[3] 13344 if p != x2.Args[0] || idx != x2.Args[1] { 13345 break 13346 } 13347 x2_2 := x2.Args[2] 13348 if x2_2.Op != OpAMD64SHRLconst || x2_2.AuxInt != 8 || w != x2_2.Args[0] { 13349 break 13350 } 13351 x1 := x2.Args[3] 13352 if x1.Op != OpAMD64MOVBstoreidx1 || x1.AuxInt != i-2 || x1.Aux != s { 13353 break 13354 } 13355 _ = x1.Args[3] 13356 if p != x1.Args[0] || idx != x1.Args[1] { 13357 break 13358 } 13359 x1_2 := x1.Args[2] 13360 if x1_2.Op != OpAMD64SHRLconst || x1_2.AuxInt != 16 || w != x1_2.Args[0] { 13361 break 13362 } 13363 x0 := x1.Args[3] 13364 if x0.Op != OpAMD64MOVBstoreidx1 || x0.AuxInt != i-3 || x0.Aux != s { 13365 break 13366 } 13367 mem := x0.Args[3] 13368 if p != x0.Args[0] || idx != x0.Args[1] { 13369 break 13370 } 13371 x0_2 := x0.Args[2] 13372 if x0_2.Op != OpAMD64SHRLconst || x0_2.AuxInt != 24 || w != x0_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 13373 break 13374 } 13375 v.reset(OpAMD64MOVLstoreidx1) 13376 v.AuxInt = i - 3 13377 v.Aux = s 13378 v.AddArg(p) 13379 v.AddArg(idx) 13380 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 13381 v0.AddArg(w) 13382 v.AddArg(v0) 13383 v.AddArg(mem) 13384 return true 13385 } 13386 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 13387 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 13388 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 13389 for { 13390 i := v.AuxInt 13391 s := v.Aux 13392 _ = v.Args[3] 13393 p := v.Args[0] 13394 idx := v.Args[1] 13395 w := v.Args[2] 13396 x6 := v.Args[3] 13397 if x6.Op != OpAMD64MOVBstoreidx1 || x6.AuxInt != i-1 || x6.Aux != s { 13398 break 13399 } 13400 _ = x6.Args[3] 13401 if p != x6.Args[0] || idx != x6.Args[1] { 13402 break 13403 } 13404 x6_2 := x6.Args[2] 13405 if x6_2.Op != OpAMD64SHRQconst || x6_2.AuxInt != 8 || w != x6_2.Args[0] { 13406 break 13407 } 13408 x5 := x6.Args[3] 13409 if x5.Op != OpAMD64MOVBstoreidx1 || x5.AuxInt != i-2 || x5.Aux != s { 13410 break 13411 } 13412 _ = x5.Args[3] 13413 if p != x5.Args[0] || idx != x5.Args[1] { 13414 break 13415 } 13416 x5_2 := x5.Args[2] 13417 if x5_2.Op != OpAMD64SHRQconst || x5_2.AuxInt != 16 || w != x5_2.Args[0] { 13418 break 13419 } 13420 x4 := x5.Args[3] 13421 if x4.Op != OpAMD64MOVBstoreidx1 || x4.AuxInt != i-3 || x4.Aux != s { 13422 break 13423 } 13424 _ = x4.Args[3] 13425 if p != x4.Args[0] || idx != x4.Args[1] { 13426 break 13427 } 13428 x4_2 := x4.Args[2] 13429 if x4_2.Op != OpAMD64SHRQconst || x4_2.AuxInt != 24 || w != x4_2.Args[0] { 13430 break 13431 } 13432 x3 := x4.Args[3] 13433 if x3.Op != OpAMD64MOVBstoreidx1 || x3.AuxInt != i-4 || x3.Aux != s { 13434 break 13435 } 13436 _ = x3.Args[3] 13437 if p != x3.Args[0] || idx != x3.Args[1] { 13438 break 13439 } 13440 x3_2 := x3.Args[2] 13441 if x3_2.Op != OpAMD64SHRQconst || x3_2.AuxInt != 32 || w != x3_2.Args[0] { 13442 break 13443 } 13444 x2 := x3.Args[3] 13445 if x2.Op != OpAMD64MOVBstoreidx1 || x2.AuxInt != i-5 || x2.Aux != s { 13446 break 13447 } 13448 _ = x2.Args[3] 13449 if p != x2.Args[0] || idx != x2.Args[1] { 13450 break 13451 } 13452 x2_2 := x2.Args[2] 13453 if x2_2.Op != OpAMD64SHRQconst || x2_2.AuxInt != 40 || w != x2_2.Args[0] { 13454 break 13455 } 13456 x1 := x2.Args[3] 13457 if x1.Op != OpAMD64MOVBstoreidx1 || x1.AuxInt != i-6 || x1.Aux != s { 13458 break 13459 } 13460 _ = x1.Args[3] 13461 if p != x1.Args[0] || idx != x1.Args[1] { 13462 break 13463 } 13464 x1_2 := x1.Args[2] 13465 if x1_2.Op != OpAMD64SHRQconst || x1_2.AuxInt != 48 || w != x1_2.Args[0] { 13466 break 13467 } 13468 x0 := x1.Args[3] 13469 if x0.Op != OpAMD64MOVBstoreidx1 || x0.AuxInt != i-7 || x0.Aux != s { 13470 break 13471 } 13472 mem := x0.Args[3] 13473 if p != x0.Args[0] || idx != x0.Args[1] { 13474 break 13475 } 13476 x0_2 := x0.Args[2] 13477 if x0_2.Op != OpAMD64SHRQconst || x0_2.AuxInt != 56 || w != x0_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 13478 break 13479 } 13480 v.reset(OpAMD64MOVQstoreidx1) 13481 v.AuxInt = i - 7 13482 v.Aux = s 13483 v.AddArg(p) 13484 v.AddArg(idx) 13485 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 13486 v0.AddArg(w) 13487 v.AddArg(v0) 13488 v.AddArg(mem) 13489 return true 13490 } 13491 // match: (MOVBstoreidx1 [i] {s} p idx (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 13492 // cond: x.Uses == 1 && clobber(x) 13493 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 13494 for { 13495 i := v.AuxInt 13496 s := v.Aux 13497 _ = v.Args[3] 13498 p := v.Args[0] 13499 idx := v.Args[1] 13500 v_2 := v.Args[2] 13501 if v_2.Op != OpAMD64SHRWconst || v_2.AuxInt != 8 { 13502 break 13503 } 13504 w := v_2.Args[0] 13505 x := v.Args[3] 13506 if x.Op != OpAMD64MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { 13507 break 13508 } 13509 mem := x.Args[3] 13510 if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { 13511 break 13512 } 13513 v.reset(OpAMD64MOVWstoreidx1) 13514 v.AuxInt = i - 1 13515 v.Aux = s 13516 v.AddArg(p) 13517 v.AddArg(idx) 13518 v.AddArg(w) 13519 v.AddArg(mem) 13520 return true 13521 } 13522 // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 13523 // cond: x.Uses == 1 && clobber(x) 13524 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 13525 for { 13526 i := v.AuxInt 13527 s := v.Aux 13528 _ = v.Args[3] 13529 p := v.Args[0] 13530 idx := v.Args[1] 13531 v_2 := v.Args[2] 13532 if v_2.Op != OpAMD64SHRLconst || v_2.AuxInt != 8 { 13533 break 13534 } 13535 w := v_2.Args[0] 13536 x := v.Args[3] 13537 if x.Op != OpAMD64MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { 13538 break 13539 } 13540 mem := x.Args[3] 13541 if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { 13542 break 13543 } 13544 v.reset(OpAMD64MOVWstoreidx1) 13545 v.AuxInt = i - 1 13546 v.Aux = s 13547 v.AddArg(p) 13548 v.AddArg(idx) 13549 v.AddArg(w) 13550 v.AddArg(mem) 13551 return true 13552 } 13553 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 13554 // cond: x.Uses == 1 && clobber(x) 13555 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 13556 for { 13557 i := v.AuxInt 13558 s := v.Aux 13559 _ = v.Args[3] 13560 p := v.Args[0] 13561 idx := v.Args[1] 13562 v_2 := v.Args[2] 13563 if v_2.Op != OpAMD64SHRQconst || v_2.AuxInt != 8 { 13564 break 13565 } 13566 w := v_2.Args[0] 13567 x := v.Args[3] 13568 if x.Op != OpAMD64MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { 13569 break 13570 } 13571 mem := x.Args[3] 13572 if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { 13573 break 13574 } 13575 v.reset(OpAMD64MOVWstoreidx1) 13576 v.AuxInt = i - 1 13577 v.Aux = s 13578 v.AddArg(p) 13579 v.AddArg(idx) 13580 v.AddArg(w) 13581 v.AddArg(mem) 13582 return true 13583 } 13584 // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) 13585 // cond: x.Uses == 1 && clobber(x) 13586 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 13587 for { 13588 i := v.AuxInt 13589 s := v.Aux 13590 _ = v.Args[3] 13591 p := v.Args[0] 13592 idx := v.Args[1] 13593 v_2 := v.Args[2] 13594 if v_2.Op != OpAMD64SHRLconst { 13595 break 13596 } 13597 j := v_2.AuxInt 13598 w := v_2.Args[0] 13599 x := v.Args[3] 13600 if x.Op != OpAMD64MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { 13601 break 13602 } 13603 mem := x.Args[3] 13604 if p != x.Args[0] || idx != x.Args[1] { 13605 break 13606 } 13607 w0 := x.Args[2] 13608 if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 13609 break 13610 } 13611 v.reset(OpAMD64MOVWstoreidx1) 13612 v.AuxInt = i - 1 13613 v.Aux = s 13614 v.AddArg(p) 13615 v.AddArg(idx) 13616 v.AddArg(w0) 13617 v.AddArg(mem) 13618 return true 13619 } 13620 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 13621 // cond: x.Uses == 1 && clobber(x) 13622 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 13623 for { 13624 i := v.AuxInt 13625 s := v.Aux 13626 _ = v.Args[3] 13627 p := v.Args[0] 13628 idx := v.Args[1] 13629 v_2 := v.Args[2] 13630 if v_2.Op != OpAMD64SHRQconst { 13631 break 13632 } 13633 j := v_2.AuxInt 13634 w := v_2.Args[0] 13635 x := v.Args[3] 13636 if x.Op != OpAMD64MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s { 13637 break 13638 } 13639 mem := x.Args[3] 13640 if p != x.Args[0] || idx != x.Args[1] { 13641 break 13642 } 13643 w0 := x.Args[2] 13644 if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 13645 break 13646 } 13647 v.reset(OpAMD64MOVWstoreidx1) 13648 v.AuxInt = i - 1 13649 v.Aux = s 13650 v.AddArg(p) 13651 v.AddArg(idx) 13652 v.AddArg(w0) 13653 v.AddArg(mem) 13654 return true 13655 } 13656 return false 13657 } 13658 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_10(v *Value) bool { 13659 // match: (MOVBstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 13660 // cond: is32Bit(i+c) 13661 // result: (MOVBstore [i+c] {s} p w mem) 13662 for { 13663 i := v.AuxInt 13664 s := v.Aux 13665 mem := v.Args[3] 13666 p := v.Args[0] 13667 v_1 := v.Args[1] 13668 if v_1.Op != OpAMD64MOVQconst { 13669 break 13670 } 13671 c := v_1.AuxInt 13672 w := v.Args[2] 13673 if !(is32Bit(i + c)) { 13674 break 13675 } 13676 v.reset(OpAMD64MOVBstore) 13677 v.AuxInt = i + c 13678 v.Aux = s 13679 v.AddArg(p) 13680 v.AddArg(w) 13681 v.AddArg(mem) 13682 return true 13683 } 13684 return false 13685 } 13686 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { 13687 b := v.Block 13688 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 13689 // cond: x.Uses == 1 && clobber(x) 13690 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 13691 for { 13692 x := v.Args[0] 13693 if x.Op != OpAMD64MOVLload { 13694 break 13695 } 13696 off := x.AuxInt 13697 sym := x.Aux 13698 mem := x.Args[1] 13699 ptr := x.Args[0] 13700 if !(x.Uses == 1 && clobber(x)) { 13701 break 13702 } 13703 b = x.Block 13704 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) 13705 v.reset(OpCopy) 13706 v.AddArg(v0) 13707 v0.AuxInt = off 13708 v0.Aux = sym 13709 v0.AddArg(ptr) 13710 v0.AddArg(mem) 13711 return true 13712 } 13713 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 13714 // cond: x.Uses == 1 && clobber(x) 13715 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 13716 for { 13717 x := v.Args[0] 13718 if x.Op != OpAMD64MOVQload { 13719 break 13720 } 13721 off := x.AuxInt 13722 sym := x.Aux 13723 mem := x.Args[1] 13724 ptr := x.Args[0] 13725 if !(x.Uses == 1 && clobber(x)) { 13726 break 13727 } 13728 b = x.Block 13729 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) 13730 v.reset(OpCopy) 13731 v.AddArg(v0) 13732 v0.AuxInt = off 13733 v0.Aux = sym 13734 v0.AddArg(ptr) 13735 v0.AddArg(mem) 13736 return true 13737 } 13738 // match: (MOVLQSX (ANDLconst [c] x)) 13739 // cond: c & 0x80000000 == 0 13740 // result: (ANDLconst [c & 0x7fffffff] x) 13741 for { 13742 v_0 := v.Args[0] 13743 if v_0.Op != OpAMD64ANDLconst { 13744 break 13745 } 13746 c := v_0.AuxInt 13747 x := v_0.Args[0] 13748 if !(c&0x80000000 == 0) { 13749 break 13750 } 13751 v.reset(OpAMD64ANDLconst) 13752 v.AuxInt = c & 0x7fffffff 13753 v.AddArg(x) 13754 return true 13755 } 13756 // match: (MOVLQSX (MOVLQSX x)) 13757 // result: (MOVLQSX x) 13758 for { 13759 v_0 := v.Args[0] 13760 if v_0.Op != OpAMD64MOVLQSX { 13761 break 13762 } 13763 x := v_0.Args[0] 13764 v.reset(OpAMD64MOVLQSX) 13765 v.AddArg(x) 13766 return true 13767 } 13768 // match: (MOVLQSX (MOVWQSX x)) 13769 // result: (MOVWQSX x) 13770 for { 13771 v_0 := v.Args[0] 13772 if v_0.Op != OpAMD64MOVWQSX { 13773 break 13774 } 13775 x := v_0.Args[0] 13776 v.reset(OpAMD64MOVWQSX) 13777 v.AddArg(x) 13778 return true 13779 } 13780 // match: (MOVLQSX (MOVBQSX x)) 13781 // result: (MOVBQSX x) 13782 for { 13783 v_0 := v.Args[0] 13784 if v_0.Op != OpAMD64MOVBQSX { 13785 break 13786 } 13787 x := v_0.Args[0] 13788 v.reset(OpAMD64MOVBQSX) 13789 v.AddArg(x) 13790 return true 13791 } 13792 return false 13793 } 13794 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { 13795 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 13796 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 13797 // result: (MOVLQSX x) 13798 for { 13799 off := v.AuxInt 13800 sym := v.Aux 13801 _ = v.Args[1] 13802 ptr := v.Args[0] 13803 v_1 := v.Args[1] 13804 if v_1.Op != OpAMD64MOVLstore { 13805 break 13806 } 13807 off2 := v_1.AuxInt 13808 sym2 := v_1.Aux 13809 _ = v_1.Args[2] 13810 ptr2 := v_1.Args[0] 13811 x := v_1.Args[1] 13812 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 13813 break 13814 } 13815 v.reset(OpAMD64MOVLQSX) 13816 v.AddArg(x) 13817 return true 13818 } 13819 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 13820 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13821 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 13822 for { 13823 off1 := v.AuxInt 13824 sym1 := v.Aux 13825 mem := v.Args[1] 13826 v_0 := v.Args[0] 13827 if v_0.Op != OpAMD64LEAQ { 13828 break 13829 } 13830 off2 := v_0.AuxInt 13831 sym2 := v_0.Aux 13832 base := v_0.Args[0] 13833 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13834 break 13835 } 13836 v.reset(OpAMD64MOVLQSXload) 13837 v.AuxInt = off1 + off2 13838 v.Aux = mergeSym(sym1, sym2) 13839 v.AddArg(base) 13840 v.AddArg(mem) 13841 return true 13842 } 13843 return false 13844 } 13845 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { 13846 b := v.Block 13847 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 13848 // cond: x.Uses == 1 && clobber(x) 13849 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 13850 for { 13851 x := v.Args[0] 13852 if x.Op != OpAMD64MOVLload { 13853 break 13854 } 13855 off := x.AuxInt 13856 sym := x.Aux 13857 mem := x.Args[1] 13858 ptr := x.Args[0] 13859 if !(x.Uses == 1 && clobber(x)) { 13860 break 13861 } 13862 b = x.Block 13863 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) 13864 v.reset(OpCopy) 13865 v.AddArg(v0) 13866 v0.AuxInt = off 13867 v0.Aux = sym 13868 v0.AddArg(ptr) 13869 v0.AddArg(mem) 13870 return true 13871 } 13872 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 13873 // cond: x.Uses == 1 && clobber(x) 13874 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 13875 for { 13876 x := v.Args[0] 13877 if x.Op != OpAMD64MOVQload { 13878 break 13879 } 13880 off := x.AuxInt 13881 sym := x.Aux 13882 mem := x.Args[1] 13883 ptr := x.Args[0] 13884 if !(x.Uses == 1 && clobber(x)) { 13885 break 13886 } 13887 b = x.Block 13888 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) 13889 v.reset(OpCopy) 13890 v.AddArg(v0) 13891 v0.AuxInt = off 13892 v0.Aux = sym 13893 v0.AddArg(ptr) 13894 v0.AddArg(mem) 13895 return true 13896 } 13897 // match: (MOVLQZX x) 13898 // cond: zeroUpper32Bits(x,3) 13899 // result: x 13900 for { 13901 x := v.Args[0] 13902 if !(zeroUpper32Bits(x, 3)) { 13903 break 13904 } 13905 v.reset(OpCopy) 13906 v.Type = x.Type 13907 v.AddArg(x) 13908 return true 13909 } 13910 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 13911 // cond: x.Uses == 1 && clobber(x) 13912 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 13913 for { 13914 x := v.Args[0] 13915 if x.Op != OpAMD64MOVLloadidx1 { 13916 break 13917 } 13918 off := x.AuxInt 13919 sym := x.Aux 13920 mem := x.Args[2] 13921 ptr := x.Args[0] 13922 idx := x.Args[1] 13923 if !(x.Uses == 1 && clobber(x)) { 13924 break 13925 } 13926 b = x.Block 13927 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 13928 v.reset(OpCopy) 13929 v.AddArg(v0) 13930 v0.AuxInt = off 13931 v0.Aux = sym 13932 v0.AddArg(ptr) 13933 v0.AddArg(idx) 13934 v0.AddArg(mem) 13935 return true 13936 } 13937 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 13938 // cond: x.Uses == 1 && clobber(x) 13939 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 13940 for { 13941 x := v.Args[0] 13942 if x.Op != OpAMD64MOVLloadidx4 { 13943 break 13944 } 13945 off := x.AuxInt 13946 sym := x.Aux 13947 mem := x.Args[2] 13948 ptr := x.Args[0] 13949 idx := x.Args[1] 13950 if !(x.Uses == 1 && clobber(x)) { 13951 break 13952 } 13953 b = x.Block 13954 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 13955 v.reset(OpCopy) 13956 v.AddArg(v0) 13957 v0.AuxInt = off 13958 v0.Aux = sym 13959 v0.AddArg(ptr) 13960 v0.AddArg(idx) 13961 v0.AddArg(mem) 13962 return true 13963 } 13964 // match: (MOVLQZX (ANDLconst [c] x)) 13965 // result: (ANDLconst [c] x) 13966 for { 13967 v_0 := v.Args[0] 13968 if v_0.Op != OpAMD64ANDLconst { 13969 break 13970 } 13971 c := v_0.AuxInt 13972 x := v_0.Args[0] 13973 v.reset(OpAMD64ANDLconst) 13974 v.AuxInt = c 13975 v.AddArg(x) 13976 return true 13977 } 13978 // match: (MOVLQZX (MOVLQZX x)) 13979 // result: (MOVLQZX x) 13980 for { 13981 v_0 := v.Args[0] 13982 if v_0.Op != OpAMD64MOVLQZX { 13983 break 13984 } 13985 x := v_0.Args[0] 13986 v.reset(OpAMD64MOVLQZX) 13987 v.AddArg(x) 13988 return true 13989 } 13990 // match: (MOVLQZX (MOVWQZX x)) 13991 // result: (MOVWQZX x) 13992 for { 13993 v_0 := v.Args[0] 13994 if v_0.Op != OpAMD64MOVWQZX { 13995 break 13996 } 13997 x := v_0.Args[0] 13998 v.reset(OpAMD64MOVWQZX) 13999 v.AddArg(x) 14000 return true 14001 } 14002 // match: (MOVLQZX (MOVBQZX x)) 14003 // result: (MOVBQZX x) 14004 for { 14005 v_0 := v.Args[0] 14006 if v_0.Op != OpAMD64MOVBQZX { 14007 break 14008 } 14009 x := v_0.Args[0] 14010 v.reset(OpAMD64MOVBQZX) 14011 v.AddArg(x) 14012 return true 14013 } 14014 return false 14015 } 14016 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { 14017 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 14018 // cond: is32Bit(off1+off2) 14019 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 14020 for { 14021 off1 := v.AuxInt 14022 sym := v.Aux 14023 mem := v.Args[1] 14024 v_0 := v.Args[0] 14025 if v_0.Op != OpAMD64ADDQconst { 14026 break 14027 } 14028 off2 := v_0.AuxInt 14029 ptr := v_0.Args[0] 14030 if !(is32Bit(off1 + off2)) { 14031 break 14032 } 14033 v.reset(OpAMD64MOVLatomicload) 14034 v.AuxInt = off1 + off2 14035 v.Aux = sym 14036 v.AddArg(ptr) 14037 v.AddArg(mem) 14038 return true 14039 } 14040 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 14041 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 14042 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 14043 for { 14044 off1 := v.AuxInt 14045 sym1 := v.Aux 14046 mem := v.Args[1] 14047 v_0 := v.Args[0] 14048 if v_0.Op != OpAMD64LEAQ { 14049 break 14050 } 14051 off2 := v_0.AuxInt 14052 sym2 := v_0.Aux 14053 ptr := v_0.Args[0] 14054 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 14055 break 14056 } 14057 v.reset(OpAMD64MOVLatomicload) 14058 v.AuxInt = off1 + off2 14059 v.Aux = mergeSym(sym1, sym2) 14060 v.AddArg(ptr) 14061 v.AddArg(mem) 14062 return true 14063 } 14064 return false 14065 } 14066 func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool { 14067 b := v.Block 14068 // match: (MOVLf2i <t> (Arg <u> [off] {sym})) 14069 // cond: t.Size() == u.Size() 14070 // result: @b.Func.Entry (Arg <t> [off] {sym}) 14071 for { 14072 t := v.Type 14073 v_0 := v.Args[0] 14074 if v_0.Op != OpArg { 14075 break 14076 } 14077 u := v_0.Type 14078 off := v_0.AuxInt 14079 sym := v_0.Aux 14080 if !(t.Size() == u.Size()) { 14081 break 14082 } 14083 b = b.Func.Entry 14084 v0 := b.NewValue0(v.Pos, OpArg, t) 14085 v.reset(OpCopy) 14086 v.AddArg(v0) 14087 v0.AuxInt = off 14088 v0.Aux = sym 14089 return true 14090 } 14091 return false 14092 } 14093 func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool { 14094 b := v.Block 14095 // match: (MOVLi2f <t> (Arg <u> [off] {sym})) 14096 // cond: t.Size() == u.Size() 14097 // result: @b.Func.Entry (Arg <t> [off] {sym}) 14098 for { 14099 t := v.Type 14100 v_0 := v.Args[0] 14101 if v_0.Op != OpArg { 14102 break 14103 } 14104 u := v_0.Type 14105 off := v_0.AuxInt 14106 sym := v_0.Aux 14107 if !(t.Size() == u.Size()) { 14108 break 14109 } 14110 b = b.Func.Entry 14111 v0 := b.NewValue0(v.Pos, OpArg, t) 14112 v.reset(OpCopy) 14113 v.AddArg(v0) 14114 v0.AuxInt = off 14115 v0.Aux = sym 14116 return true 14117 } 14118 return false 14119 } 14120 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { 14121 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 14122 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 14123 // result: (MOVLQZX x) 14124 for { 14125 off := v.AuxInt 14126 sym := v.Aux 14127 _ = v.Args[1] 14128 ptr := v.Args[0] 14129 v_1 := v.Args[1] 14130 if v_1.Op != OpAMD64MOVLstore { 14131 break 14132 } 14133 off2 := v_1.AuxInt 14134 sym2 := v_1.Aux 14135 _ = v_1.Args[2] 14136 ptr2 := v_1.Args[0] 14137 x := v_1.Args[1] 14138 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 14139 break 14140 } 14141 v.reset(OpAMD64MOVLQZX) 14142 v.AddArg(x) 14143 return true 14144 } 14145 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 14146 // cond: is32Bit(off1+off2) 14147 // result: (MOVLload [off1+off2] {sym} ptr mem) 14148 for { 14149 off1 := v.AuxInt 14150 sym := v.Aux 14151 mem := v.Args[1] 14152 v_0 := v.Args[0] 14153 if v_0.Op != OpAMD64ADDQconst { 14154 break 14155 } 14156 off2 := v_0.AuxInt 14157 ptr := v_0.Args[0] 14158 if !(is32Bit(off1 + off2)) { 14159 break 14160 } 14161 v.reset(OpAMD64MOVLload) 14162 v.AuxInt = off1 + off2 14163 v.Aux = sym 14164 v.AddArg(ptr) 14165 v.AddArg(mem) 14166 return true 14167 } 14168 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 14169 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 14170 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 14171 for { 14172 off1 := v.AuxInt 14173 sym1 := v.Aux 14174 mem := v.Args[1] 14175 v_0 := v.Args[0] 14176 if v_0.Op != OpAMD64LEAQ { 14177 break 14178 } 14179 off2 := v_0.AuxInt 14180 sym2 := v_0.Aux 14181 base := v_0.Args[0] 14182 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 14183 break 14184 } 14185 v.reset(OpAMD64MOVLload) 14186 v.AuxInt = off1 + off2 14187 v.Aux = mergeSym(sym1, sym2) 14188 v.AddArg(base) 14189 v.AddArg(mem) 14190 return true 14191 } 14192 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 14193 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 14194 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 14195 for { 14196 off1 := v.AuxInt 14197 sym1 := v.Aux 14198 mem := v.Args[1] 14199 v_0 := v.Args[0] 14200 if v_0.Op != OpAMD64LEAQ1 { 14201 break 14202 } 14203 off2 := v_0.AuxInt 14204 sym2 := v_0.Aux 14205 idx := v_0.Args[1] 14206 ptr := v_0.Args[0] 14207 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 14208 break 14209 } 14210 v.reset(OpAMD64MOVLloadidx1) 14211 v.AuxInt = off1 + off2 14212 v.Aux = mergeSym(sym1, sym2) 14213 v.AddArg(ptr) 14214 v.AddArg(idx) 14215 v.AddArg(mem) 14216 return true 14217 } 14218 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 14219 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 14220 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 14221 for { 14222 off1 := v.AuxInt 14223 sym1 := v.Aux 14224 mem := v.Args[1] 14225 v_0 := v.Args[0] 14226 if v_0.Op != OpAMD64LEAQ4 { 14227 break 14228 } 14229 off2 := v_0.AuxInt 14230 sym2 := v_0.Aux 14231 idx := v_0.Args[1] 14232 ptr := v_0.Args[0] 14233 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 14234 break 14235 } 14236 v.reset(OpAMD64MOVLloadidx4) 14237 v.AuxInt = off1 + off2 14238 v.Aux = mergeSym(sym1, sym2) 14239 v.AddArg(ptr) 14240 v.AddArg(idx) 14241 v.AddArg(mem) 14242 return true 14243 } 14244 // match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 14245 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 14246 // result: (MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 14247 for { 14248 off1 := v.AuxInt 14249 sym1 := v.Aux 14250 mem := v.Args[1] 14251 v_0 := v.Args[0] 14252 if v_0.Op != OpAMD64LEAQ8 { 14253 break 14254 } 14255 off2 := v_0.AuxInt 14256 sym2 := v_0.Aux 14257 idx := v_0.Args[1] 14258 ptr := v_0.Args[0] 14259 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 14260 break 14261 } 14262 v.reset(OpAMD64MOVLloadidx8) 14263 v.AuxInt = off1 + off2 14264 v.Aux = mergeSym(sym1, sym2) 14265 v.AddArg(ptr) 14266 v.AddArg(idx) 14267 v.AddArg(mem) 14268 return true 14269 } 14270 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 14271 // cond: ptr.Op != OpSB 14272 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 14273 for { 14274 off := v.AuxInt 14275 sym := v.Aux 14276 mem := v.Args[1] 14277 v_0 := v.Args[0] 14278 if v_0.Op != OpAMD64ADDQ { 14279 break 14280 } 14281 idx := v_0.Args[1] 14282 ptr := v_0.Args[0] 14283 if !(ptr.Op != OpSB) { 14284 break 14285 } 14286 v.reset(OpAMD64MOVLloadidx1) 14287 v.AuxInt = off 14288 v.Aux = sym 14289 v.AddArg(ptr) 14290 v.AddArg(idx) 14291 v.AddArg(mem) 14292 return true 14293 } 14294 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 14295 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 14296 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 14297 for { 14298 off1 := v.AuxInt 14299 sym1 := v.Aux 14300 mem := v.Args[1] 14301 v_0 := v.Args[0] 14302 if v_0.Op != OpAMD64LEAL { 14303 break 14304 } 14305 off2 := v_0.AuxInt 14306 sym2 := v_0.Aux 14307 base := v_0.Args[0] 14308 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 14309 break 14310 } 14311 v.reset(OpAMD64MOVLload) 14312 v.AuxInt = off1 + off2 14313 v.Aux = mergeSym(sym1, sym2) 14314 v.AddArg(base) 14315 v.AddArg(mem) 14316 return true 14317 } 14318 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 14319 // cond: is32Bit(off1+off2) 14320 // result: (MOVLload [off1+off2] {sym} ptr mem) 14321 for { 14322 off1 := v.AuxInt 14323 sym := v.Aux 14324 mem := v.Args[1] 14325 v_0 := v.Args[0] 14326 if v_0.Op != OpAMD64ADDLconst { 14327 break 14328 } 14329 off2 := v_0.AuxInt 14330 ptr := v_0.Args[0] 14331 if !(is32Bit(off1 + off2)) { 14332 break 14333 } 14334 v.reset(OpAMD64MOVLload) 14335 v.AuxInt = off1 + off2 14336 v.Aux = sym 14337 v.AddArg(ptr) 14338 v.AddArg(mem) 14339 return true 14340 } 14341 // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) 14342 // result: (MOVLf2i val) 14343 for { 14344 off := v.AuxInt 14345 sym := v.Aux 14346 _ = v.Args[1] 14347 ptr := v.Args[0] 14348 v_1 := v.Args[1] 14349 if v_1.Op != OpAMD64MOVSSstore || v_1.AuxInt != off || v_1.Aux != sym { 14350 break 14351 } 14352 _ = v_1.Args[2] 14353 if ptr != v_1.Args[0] { 14354 break 14355 } 14356 val := v_1.Args[1] 14357 v.reset(OpAMD64MOVLf2i) 14358 v.AddArg(val) 14359 return true 14360 } 14361 return false 14362 } 14363 func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool { 14364 b := v.Block 14365 config := b.Func.Config 14366 // match: (MOVLload [off] {sym} (SB) _) 14367 // cond: symIsRO(sym) 14368 // result: (MOVQconst [int64(read32(sym, off, config.BigEndian))]) 14369 for { 14370 off := v.AuxInt 14371 sym := v.Aux 14372 _ = v.Args[1] 14373 v_0 := v.Args[0] 14374 if v_0.Op != OpSB || !(symIsRO(sym)) { 14375 break 14376 } 14377 v.reset(OpAMD64MOVQconst) 14378 v.AuxInt = int64(read32(sym, off, config.BigEndian)) 14379 return true 14380 } 14381 return false 14382 } 14383 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { 14384 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 14385 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 14386 for { 14387 c := v.AuxInt 14388 sym := v.Aux 14389 mem := v.Args[2] 14390 ptr := v.Args[0] 14391 v_1 := v.Args[1] 14392 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { 14393 break 14394 } 14395 idx := v_1.Args[0] 14396 v.reset(OpAMD64MOVLloadidx4) 14397 v.AuxInt = c 14398 v.Aux = sym 14399 v.AddArg(ptr) 14400 v.AddArg(idx) 14401 v.AddArg(mem) 14402 return true 14403 } 14404 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 14405 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 14406 for { 14407 c := v.AuxInt 14408 sym := v.Aux 14409 mem := v.Args[2] 14410 v_0 := v.Args[0] 14411 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 2 { 14412 break 14413 } 14414 idx := v_0.Args[0] 14415 ptr := v.Args[1] 14416 v.reset(OpAMD64MOVLloadidx4) 14417 v.AuxInt = c 14418 v.Aux = sym 14419 v.AddArg(ptr) 14420 v.AddArg(idx) 14421 v.AddArg(mem) 14422 return true 14423 } 14424 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 14425 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 14426 for { 14427 c := v.AuxInt 14428 sym := v.Aux 14429 mem := v.Args[2] 14430 ptr := v.Args[0] 14431 v_1 := v.Args[1] 14432 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { 14433 break 14434 } 14435 idx := v_1.Args[0] 14436 v.reset(OpAMD64MOVLloadidx8) 14437 v.AuxInt = c 14438 v.Aux = sym 14439 v.AddArg(ptr) 14440 v.AddArg(idx) 14441 v.AddArg(mem) 14442 return true 14443 } 14444 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 14445 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 14446 for { 14447 c := v.AuxInt 14448 sym := v.Aux 14449 mem := v.Args[2] 14450 v_0 := v.Args[0] 14451 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 3 { 14452 break 14453 } 14454 idx := v_0.Args[0] 14455 ptr := v.Args[1] 14456 v.reset(OpAMD64MOVLloadidx8) 14457 v.AuxInt = c 14458 v.Aux = sym 14459 v.AddArg(ptr) 14460 v.AddArg(idx) 14461 v.AddArg(mem) 14462 return true 14463 } 14464 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 14465 // cond: is32Bit(c+d) 14466 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 14467 for { 14468 c := v.AuxInt 14469 sym := v.Aux 14470 mem := v.Args[2] 14471 v_0 := v.Args[0] 14472 if v_0.Op != OpAMD64ADDQconst { 14473 break 14474 } 14475 d := v_0.AuxInt 14476 ptr := v_0.Args[0] 14477 idx := v.Args[1] 14478 if !(is32Bit(c + d)) { 14479 break 14480 } 14481 v.reset(OpAMD64MOVLloadidx1) 14482 v.AuxInt = c + d 14483 v.Aux = sym 14484 v.AddArg(ptr) 14485 v.AddArg(idx) 14486 v.AddArg(mem) 14487 return true 14488 } 14489 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 14490 // cond: is32Bit(c+d) 14491 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 14492 for { 14493 c := v.AuxInt 14494 sym := v.Aux 14495 mem := v.Args[2] 14496 idx := v.Args[0] 14497 v_1 := v.Args[1] 14498 if v_1.Op != OpAMD64ADDQconst { 14499 break 14500 } 14501 d := v_1.AuxInt 14502 ptr := v_1.Args[0] 14503 if !(is32Bit(c + d)) { 14504 break 14505 } 14506 v.reset(OpAMD64MOVLloadidx1) 14507 v.AuxInt = c + d 14508 v.Aux = sym 14509 v.AddArg(ptr) 14510 v.AddArg(idx) 14511 v.AddArg(mem) 14512 return true 14513 } 14514 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 14515 // cond: is32Bit(c+d) 14516 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 14517 for { 14518 c := v.AuxInt 14519 sym := v.Aux 14520 mem := v.Args[2] 14521 ptr := v.Args[0] 14522 v_1 := v.Args[1] 14523 if v_1.Op != OpAMD64ADDQconst { 14524 break 14525 } 14526 d := v_1.AuxInt 14527 idx := v_1.Args[0] 14528 if !(is32Bit(c + d)) { 14529 break 14530 } 14531 v.reset(OpAMD64MOVLloadidx1) 14532 v.AuxInt = c + d 14533 v.Aux = sym 14534 v.AddArg(ptr) 14535 v.AddArg(idx) 14536 v.AddArg(mem) 14537 return true 14538 } 14539 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 14540 // cond: is32Bit(c+d) 14541 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 14542 for { 14543 c := v.AuxInt 14544 sym := v.Aux 14545 mem := v.Args[2] 14546 v_0 := v.Args[0] 14547 if v_0.Op != OpAMD64ADDQconst { 14548 break 14549 } 14550 d := v_0.AuxInt 14551 idx := v_0.Args[0] 14552 ptr := v.Args[1] 14553 if !(is32Bit(c + d)) { 14554 break 14555 } 14556 v.reset(OpAMD64MOVLloadidx1) 14557 v.AuxInt = c + d 14558 v.Aux = sym 14559 v.AddArg(ptr) 14560 v.AddArg(idx) 14561 v.AddArg(mem) 14562 return true 14563 } 14564 // match: (MOVLloadidx1 [i] {s} p (MOVQconst [c]) mem) 14565 // cond: is32Bit(i+c) 14566 // result: (MOVLload [i+c] {s} p mem) 14567 for { 14568 i := v.AuxInt 14569 s := v.Aux 14570 mem := v.Args[2] 14571 p := v.Args[0] 14572 v_1 := v.Args[1] 14573 if v_1.Op != OpAMD64MOVQconst { 14574 break 14575 } 14576 c := v_1.AuxInt 14577 if !(is32Bit(i + c)) { 14578 break 14579 } 14580 v.reset(OpAMD64MOVLload) 14581 v.AuxInt = i + c 14582 v.Aux = s 14583 v.AddArg(p) 14584 v.AddArg(mem) 14585 return true 14586 } 14587 // match: (MOVLloadidx1 [i] {s} (MOVQconst [c]) p mem) 14588 // cond: is32Bit(i+c) 14589 // result: (MOVLload [i+c] {s} p mem) 14590 for { 14591 i := v.AuxInt 14592 s := v.Aux 14593 mem := v.Args[2] 14594 v_0 := v.Args[0] 14595 if v_0.Op != OpAMD64MOVQconst { 14596 break 14597 } 14598 c := v_0.AuxInt 14599 p := v.Args[1] 14600 if !(is32Bit(i + c)) { 14601 break 14602 } 14603 v.reset(OpAMD64MOVLload) 14604 v.AuxInt = i + c 14605 v.Aux = s 14606 v.AddArg(p) 14607 v.AddArg(mem) 14608 return true 14609 } 14610 return false 14611 } 14612 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { 14613 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 14614 // cond: is32Bit(c+d) 14615 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 14616 for { 14617 c := v.AuxInt 14618 sym := v.Aux 14619 mem := v.Args[2] 14620 v_0 := v.Args[0] 14621 if v_0.Op != OpAMD64ADDQconst { 14622 break 14623 } 14624 d := v_0.AuxInt 14625 ptr := v_0.Args[0] 14626 idx := v.Args[1] 14627 if !(is32Bit(c + d)) { 14628 break 14629 } 14630 v.reset(OpAMD64MOVLloadidx4) 14631 v.AuxInt = c + d 14632 v.Aux = sym 14633 v.AddArg(ptr) 14634 v.AddArg(idx) 14635 v.AddArg(mem) 14636 return true 14637 } 14638 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 14639 // cond: is32Bit(c+4*d) 14640 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 14641 for { 14642 c := v.AuxInt 14643 sym := v.Aux 14644 mem := v.Args[2] 14645 ptr := v.Args[0] 14646 v_1 := v.Args[1] 14647 if v_1.Op != OpAMD64ADDQconst { 14648 break 14649 } 14650 d := v_1.AuxInt 14651 idx := v_1.Args[0] 14652 if !(is32Bit(c + 4*d)) { 14653 break 14654 } 14655 v.reset(OpAMD64MOVLloadidx4) 14656 v.AuxInt = c + 4*d 14657 v.Aux = sym 14658 v.AddArg(ptr) 14659 v.AddArg(idx) 14660 v.AddArg(mem) 14661 return true 14662 } 14663 // match: (MOVLloadidx4 [i] {s} p (MOVQconst [c]) mem) 14664 // cond: is32Bit(i+4*c) 14665 // result: (MOVLload [i+4*c] {s} p mem) 14666 for { 14667 i := v.AuxInt 14668 s := v.Aux 14669 mem := v.Args[2] 14670 p := v.Args[0] 14671 v_1 := v.Args[1] 14672 if v_1.Op != OpAMD64MOVQconst { 14673 break 14674 } 14675 c := v_1.AuxInt 14676 if !(is32Bit(i + 4*c)) { 14677 break 14678 } 14679 v.reset(OpAMD64MOVLload) 14680 v.AuxInt = i + 4*c 14681 v.Aux = s 14682 v.AddArg(p) 14683 v.AddArg(mem) 14684 return true 14685 } 14686 return false 14687 } 14688 func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { 14689 // match: (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 14690 // cond: is32Bit(c+d) 14691 // result: (MOVLloadidx8 [c+d] {sym} ptr idx mem) 14692 for { 14693 c := v.AuxInt 14694 sym := v.Aux 14695 mem := v.Args[2] 14696 v_0 := v.Args[0] 14697 if v_0.Op != OpAMD64ADDQconst { 14698 break 14699 } 14700 d := v_0.AuxInt 14701 ptr := v_0.Args[0] 14702 idx := v.Args[1] 14703 if !(is32Bit(c + d)) { 14704 break 14705 } 14706 v.reset(OpAMD64MOVLloadidx8) 14707 v.AuxInt = c + d 14708 v.Aux = sym 14709 v.AddArg(ptr) 14710 v.AddArg(idx) 14711 v.AddArg(mem) 14712 return true 14713 } 14714 // match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 14715 // cond: is32Bit(c+8*d) 14716 // result: (MOVLloadidx8 [c+8*d] {sym} ptr idx mem) 14717 for { 14718 c := v.AuxInt 14719 sym := v.Aux 14720 mem := v.Args[2] 14721 ptr := v.Args[0] 14722 v_1 := v.Args[1] 14723 if v_1.Op != OpAMD64ADDQconst { 14724 break 14725 } 14726 d := v_1.AuxInt 14727 idx := v_1.Args[0] 14728 if !(is32Bit(c + 8*d)) { 14729 break 14730 } 14731 v.reset(OpAMD64MOVLloadidx8) 14732 v.AuxInt = c + 8*d 14733 v.Aux = sym 14734 v.AddArg(ptr) 14735 v.AddArg(idx) 14736 v.AddArg(mem) 14737 return true 14738 } 14739 // match: (MOVLloadidx8 [i] {s} p (MOVQconst [c]) mem) 14740 // cond: is32Bit(i+8*c) 14741 // result: (MOVLload [i+8*c] {s} p mem) 14742 for { 14743 i := v.AuxInt 14744 s := v.Aux 14745 mem := v.Args[2] 14746 p := v.Args[0] 14747 v_1 := v.Args[1] 14748 if v_1.Op != OpAMD64MOVQconst { 14749 break 14750 } 14751 c := v_1.AuxInt 14752 if !(is32Bit(i + 8*c)) { 14753 break 14754 } 14755 v.reset(OpAMD64MOVLload) 14756 v.AuxInt = i + 8*c 14757 v.Aux = s 14758 v.AddArg(p) 14759 v.AddArg(mem) 14760 return true 14761 } 14762 return false 14763 } 14764 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { 14765 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 14766 // result: (MOVLstore [off] {sym} ptr x mem) 14767 for { 14768 off := v.AuxInt 14769 sym := v.Aux 14770 mem := v.Args[2] 14771 ptr := v.Args[0] 14772 v_1 := v.Args[1] 14773 if v_1.Op != OpAMD64MOVLQSX { 14774 break 14775 } 14776 x := v_1.Args[0] 14777 v.reset(OpAMD64MOVLstore) 14778 v.AuxInt = off 14779 v.Aux = sym 14780 v.AddArg(ptr) 14781 v.AddArg(x) 14782 v.AddArg(mem) 14783 return true 14784 } 14785 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 14786 // result: (MOVLstore [off] {sym} ptr x mem) 14787 for { 14788 off := v.AuxInt 14789 sym := v.Aux 14790 mem := v.Args[2] 14791 ptr := v.Args[0] 14792 v_1 := v.Args[1] 14793 if v_1.Op != OpAMD64MOVLQZX { 14794 break 14795 } 14796 x := v_1.Args[0] 14797 v.reset(OpAMD64MOVLstore) 14798 v.AuxInt = off 14799 v.Aux = sym 14800 v.AddArg(ptr) 14801 v.AddArg(x) 14802 v.AddArg(mem) 14803 return true 14804 } 14805 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 14806 // cond: is32Bit(off1+off2) 14807 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 14808 for { 14809 off1 := v.AuxInt 14810 sym := v.Aux 14811 mem := v.Args[2] 14812 v_0 := v.Args[0] 14813 if v_0.Op != OpAMD64ADDQconst { 14814 break 14815 } 14816 off2 := v_0.AuxInt 14817 ptr := v_0.Args[0] 14818 val := v.Args[1] 14819 if !(is32Bit(off1 + off2)) { 14820 break 14821 } 14822 v.reset(OpAMD64MOVLstore) 14823 v.AuxInt = off1 + off2 14824 v.Aux = sym 14825 v.AddArg(ptr) 14826 v.AddArg(val) 14827 v.AddArg(mem) 14828 return true 14829 } 14830 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 14831 // cond: validOff(off) 14832 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 14833 for { 14834 off := v.AuxInt 14835 sym := v.Aux 14836 mem := v.Args[2] 14837 ptr := v.Args[0] 14838 v_1 := v.Args[1] 14839 if v_1.Op != OpAMD64MOVLconst { 14840 break 14841 } 14842 c := v_1.AuxInt 14843 if !(validOff(off)) { 14844 break 14845 } 14846 v.reset(OpAMD64MOVLstoreconst) 14847 v.AuxInt = makeValAndOff(int64(int32(c)), off) 14848 v.Aux = sym 14849 v.AddArg(ptr) 14850 v.AddArg(mem) 14851 return true 14852 } 14853 // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem) 14854 // cond: validOff(off) 14855 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 14856 for { 14857 off := v.AuxInt 14858 sym := v.Aux 14859 mem := v.Args[2] 14860 ptr := v.Args[0] 14861 v_1 := v.Args[1] 14862 if v_1.Op != OpAMD64MOVQconst { 14863 break 14864 } 14865 c := v_1.AuxInt 14866 if !(validOff(off)) { 14867 break 14868 } 14869 v.reset(OpAMD64MOVLstoreconst) 14870 v.AuxInt = makeValAndOff(int64(int32(c)), off) 14871 v.Aux = sym 14872 v.AddArg(ptr) 14873 v.AddArg(mem) 14874 return true 14875 } 14876 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 14877 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 14878 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 14879 for { 14880 off1 := v.AuxInt 14881 sym1 := v.Aux 14882 mem := v.Args[2] 14883 v_0 := v.Args[0] 14884 if v_0.Op != OpAMD64LEAQ { 14885 break 14886 } 14887 off2 := v_0.AuxInt 14888 sym2 := v_0.Aux 14889 base := v_0.Args[0] 14890 val := v.Args[1] 14891 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 14892 break 14893 } 14894 v.reset(OpAMD64MOVLstore) 14895 v.AuxInt = off1 + off2 14896 v.Aux = mergeSym(sym1, sym2) 14897 v.AddArg(base) 14898 v.AddArg(val) 14899 v.AddArg(mem) 14900 return true 14901 } 14902 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 14903 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 14904 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 14905 for { 14906 off1 := v.AuxInt 14907 sym1 := v.Aux 14908 mem := v.Args[2] 14909 v_0 := v.Args[0] 14910 if v_0.Op != OpAMD64LEAQ1 { 14911 break 14912 } 14913 off2 := v_0.AuxInt 14914 sym2 := v_0.Aux 14915 idx := v_0.Args[1] 14916 ptr := v_0.Args[0] 14917 val := v.Args[1] 14918 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 14919 break 14920 } 14921 v.reset(OpAMD64MOVLstoreidx1) 14922 v.AuxInt = off1 + off2 14923 v.Aux = mergeSym(sym1, sym2) 14924 v.AddArg(ptr) 14925 v.AddArg(idx) 14926 v.AddArg(val) 14927 v.AddArg(mem) 14928 return true 14929 } 14930 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 14931 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 14932 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 14933 for { 14934 off1 := v.AuxInt 14935 sym1 := v.Aux 14936 mem := v.Args[2] 14937 v_0 := v.Args[0] 14938 if v_0.Op != OpAMD64LEAQ4 { 14939 break 14940 } 14941 off2 := v_0.AuxInt 14942 sym2 := v_0.Aux 14943 idx := v_0.Args[1] 14944 ptr := v_0.Args[0] 14945 val := v.Args[1] 14946 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 14947 break 14948 } 14949 v.reset(OpAMD64MOVLstoreidx4) 14950 v.AuxInt = off1 + off2 14951 v.Aux = mergeSym(sym1, sym2) 14952 v.AddArg(ptr) 14953 v.AddArg(idx) 14954 v.AddArg(val) 14955 v.AddArg(mem) 14956 return true 14957 } 14958 // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 14959 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 14960 // result: (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 14961 for { 14962 off1 := v.AuxInt 14963 sym1 := v.Aux 14964 mem := v.Args[2] 14965 v_0 := v.Args[0] 14966 if v_0.Op != OpAMD64LEAQ8 { 14967 break 14968 } 14969 off2 := v_0.AuxInt 14970 sym2 := v_0.Aux 14971 idx := v_0.Args[1] 14972 ptr := v_0.Args[0] 14973 val := v.Args[1] 14974 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 14975 break 14976 } 14977 v.reset(OpAMD64MOVLstoreidx8) 14978 v.AuxInt = off1 + off2 14979 v.Aux = mergeSym(sym1, sym2) 14980 v.AddArg(ptr) 14981 v.AddArg(idx) 14982 v.AddArg(val) 14983 v.AddArg(mem) 14984 return true 14985 } 14986 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 14987 // cond: ptr.Op != OpSB 14988 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 14989 for { 14990 off := v.AuxInt 14991 sym := v.Aux 14992 mem := v.Args[2] 14993 v_0 := v.Args[0] 14994 if v_0.Op != OpAMD64ADDQ { 14995 break 14996 } 14997 idx := v_0.Args[1] 14998 ptr := v_0.Args[0] 14999 val := v.Args[1] 15000 if !(ptr.Op != OpSB) { 15001 break 15002 } 15003 v.reset(OpAMD64MOVLstoreidx1) 15004 v.AuxInt = off 15005 v.Aux = sym 15006 v.AddArg(ptr) 15007 v.AddArg(idx) 15008 v.AddArg(val) 15009 v.AddArg(mem) 15010 return true 15011 } 15012 return false 15013 } 15014 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { 15015 b := v.Block 15016 typ := &b.Func.Config.Types 15017 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 15018 // cond: x.Uses == 1 && clobber(x) 15019 // result: (MOVQstore [i-4] {s} p w mem) 15020 for { 15021 i := v.AuxInt 15022 s := v.Aux 15023 _ = v.Args[2] 15024 p := v.Args[0] 15025 v_1 := v.Args[1] 15026 if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 32 { 15027 break 15028 } 15029 w := v_1.Args[0] 15030 x := v.Args[2] 15031 if x.Op != OpAMD64MOVLstore || x.AuxInt != i-4 || x.Aux != s { 15032 break 15033 } 15034 mem := x.Args[2] 15035 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) { 15036 break 15037 } 15038 v.reset(OpAMD64MOVQstore) 15039 v.AuxInt = i - 4 15040 v.Aux = s 15041 v.AddArg(p) 15042 v.AddArg(w) 15043 v.AddArg(mem) 15044 return true 15045 } 15046 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 15047 // cond: x.Uses == 1 && clobber(x) 15048 // result: (MOVQstore [i-4] {s} p w0 mem) 15049 for { 15050 i := v.AuxInt 15051 s := v.Aux 15052 _ = v.Args[2] 15053 p := v.Args[0] 15054 v_1 := v.Args[1] 15055 if v_1.Op != OpAMD64SHRQconst { 15056 break 15057 } 15058 j := v_1.AuxInt 15059 w := v_1.Args[0] 15060 x := v.Args[2] 15061 if x.Op != OpAMD64MOVLstore || x.AuxInt != i-4 || x.Aux != s { 15062 break 15063 } 15064 mem := x.Args[2] 15065 if p != x.Args[0] { 15066 break 15067 } 15068 w0 := x.Args[1] 15069 if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 15070 break 15071 } 15072 v.reset(OpAMD64MOVQstore) 15073 v.AuxInt = i - 4 15074 v.Aux = s 15075 v.AddArg(p) 15076 v.AddArg(w0) 15077 v.AddArg(mem) 15078 return true 15079 } 15080 // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) 15081 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 15082 // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) 15083 for { 15084 i := v.AuxInt 15085 s := v.Aux 15086 _ = v.Args[2] 15087 p := v.Args[0] 15088 x1 := v.Args[1] 15089 if x1.Op != OpAMD64MOVLload { 15090 break 15091 } 15092 j := x1.AuxInt 15093 s2 := x1.Aux 15094 mem := x1.Args[1] 15095 p2 := x1.Args[0] 15096 mem2 := v.Args[2] 15097 if mem2.Op != OpAMD64MOVLstore || mem2.AuxInt != i-4 || mem2.Aux != s { 15098 break 15099 } 15100 _ = mem2.Args[2] 15101 if p != mem2.Args[0] { 15102 break 15103 } 15104 x2 := mem2.Args[1] 15105 if x2.Op != OpAMD64MOVLload || x2.AuxInt != j-4 || x2.Aux != s2 { 15106 break 15107 } 15108 _ = x2.Args[1] 15109 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 15110 break 15111 } 15112 v.reset(OpAMD64MOVQstore) 15113 v.AuxInt = i - 4 15114 v.Aux = s 15115 v.AddArg(p) 15116 v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64) 15117 v0.AuxInt = j - 4 15118 v0.Aux = s2 15119 v0.AddArg(p2) 15120 v0.AddArg(mem) 15121 v.AddArg(v0) 15122 v.AddArg(mem) 15123 return true 15124 } 15125 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 15126 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 15127 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 15128 for { 15129 off1 := v.AuxInt 15130 sym1 := v.Aux 15131 mem := v.Args[2] 15132 v_0 := v.Args[0] 15133 if v_0.Op != OpAMD64LEAL { 15134 break 15135 } 15136 off2 := v_0.AuxInt 15137 sym2 := v_0.Aux 15138 base := v_0.Args[0] 15139 val := v.Args[1] 15140 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 15141 break 15142 } 15143 v.reset(OpAMD64MOVLstore) 15144 v.AuxInt = off1 + off2 15145 v.Aux = mergeSym(sym1, sym2) 15146 v.AddArg(base) 15147 v.AddArg(val) 15148 v.AddArg(mem) 15149 return true 15150 } 15151 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 15152 // cond: is32Bit(off1+off2) 15153 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 15154 for { 15155 off1 := v.AuxInt 15156 sym := v.Aux 15157 mem := v.Args[2] 15158 v_0 := v.Args[0] 15159 if v_0.Op != OpAMD64ADDLconst { 15160 break 15161 } 15162 off2 := v_0.AuxInt 15163 ptr := v_0.Args[0] 15164 val := v.Args[1] 15165 if !(is32Bit(off1 + off2)) { 15166 break 15167 } 15168 v.reset(OpAMD64MOVLstore) 15169 v.AuxInt = off1 + off2 15170 v.Aux = sym 15171 v.AddArg(ptr) 15172 v.AddArg(val) 15173 v.AddArg(mem) 15174 return true 15175 } 15176 // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) 15177 // cond: y.Uses==1 && clobber(y) 15178 // result: (ADDLmodify [off] {sym} ptr x mem) 15179 for { 15180 off := v.AuxInt 15181 sym := v.Aux 15182 mem := v.Args[2] 15183 ptr := v.Args[0] 15184 y := v.Args[1] 15185 if y.Op != OpAMD64ADDLload || y.AuxInt != off || y.Aux != sym { 15186 break 15187 } 15188 _ = y.Args[2] 15189 x := y.Args[0] 15190 if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { 15191 break 15192 } 15193 v.reset(OpAMD64ADDLmodify) 15194 v.AuxInt = off 15195 v.Aux = sym 15196 v.AddArg(ptr) 15197 v.AddArg(x) 15198 v.AddArg(mem) 15199 return true 15200 } 15201 // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) 15202 // cond: y.Uses==1 && clobber(y) 15203 // result: (ANDLmodify [off] {sym} ptr x mem) 15204 for { 15205 off := v.AuxInt 15206 sym := v.Aux 15207 mem := v.Args[2] 15208 ptr := v.Args[0] 15209 y := v.Args[1] 15210 if y.Op != OpAMD64ANDLload || y.AuxInt != off || y.Aux != sym { 15211 break 15212 } 15213 _ = y.Args[2] 15214 x := y.Args[0] 15215 if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { 15216 break 15217 } 15218 v.reset(OpAMD64ANDLmodify) 15219 v.AuxInt = off 15220 v.Aux = sym 15221 v.AddArg(ptr) 15222 v.AddArg(x) 15223 v.AddArg(mem) 15224 return true 15225 } 15226 // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) 15227 // cond: y.Uses==1 && clobber(y) 15228 // result: (ORLmodify [off] {sym} ptr x mem) 15229 for { 15230 off := v.AuxInt 15231 sym := v.Aux 15232 mem := v.Args[2] 15233 ptr := v.Args[0] 15234 y := v.Args[1] 15235 if y.Op != OpAMD64ORLload || y.AuxInt != off || y.Aux != sym { 15236 break 15237 } 15238 _ = y.Args[2] 15239 x := y.Args[0] 15240 if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { 15241 break 15242 } 15243 v.reset(OpAMD64ORLmodify) 15244 v.AuxInt = off 15245 v.Aux = sym 15246 v.AddArg(ptr) 15247 v.AddArg(x) 15248 v.AddArg(mem) 15249 return true 15250 } 15251 // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) 15252 // cond: y.Uses==1 && clobber(y) 15253 // result: (XORLmodify [off] {sym} ptr x mem) 15254 for { 15255 off := v.AuxInt 15256 sym := v.Aux 15257 mem := v.Args[2] 15258 ptr := v.Args[0] 15259 y := v.Args[1] 15260 if y.Op != OpAMD64XORLload || y.AuxInt != off || y.Aux != sym { 15261 break 15262 } 15263 _ = y.Args[2] 15264 x := y.Args[0] 15265 if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { 15266 break 15267 } 15268 v.reset(OpAMD64XORLmodify) 15269 v.AuxInt = off 15270 v.Aux = sym 15271 v.AddArg(ptr) 15272 v.AddArg(x) 15273 v.AddArg(mem) 15274 return true 15275 } 15276 // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) 15277 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 15278 // result: (ADDLmodify [off] {sym} ptr x mem) 15279 for { 15280 off := v.AuxInt 15281 sym := v.Aux 15282 mem := v.Args[2] 15283 ptr := v.Args[0] 15284 y := v.Args[1] 15285 if y.Op != OpAMD64ADDL { 15286 break 15287 } 15288 x := y.Args[1] 15289 l := y.Args[0] 15290 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15291 break 15292 } 15293 _ = l.Args[1] 15294 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 15295 break 15296 } 15297 v.reset(OpAMD64ADDLmodify) 15298 v.AuxInt = off 15299 v.Aux = sym 15300 v.AddArg(ptr) 15301 v.AddArg(x) 15302 v.AddArg(mem) 15303 return true 15304 } 15305 return false 15306 } 15307 func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { 15308 // match: (MOVLstore {sym} [off] ptr y:(ADDL x l:(MOVLload [off] {sym} ptr mem)) mem) 15309 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 15310 // result: (ADDLmodify [off] {sym} ptr x mem) 15311 for { 15312 off := v.AuxInt 15313 sym := v.Aux 15314 mem := v.Args[2] 15315 ptr := v.Args[0] 15316 y := v.Args[1] 15317 if y.Op != OpAMD64ADDL { 15318 break 15319 } 15320 _ = y.Args[1] 15321 x := y.Args[0] 15322 l := y.Args[1] 15323 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15324 break 15325 } 15326 _ = l.Args[1] 15327 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 15328 break 15329 } 15330 v.reset(OpAMD64ADDLmodify) 15331 v.AuxInt = off 15332 v.Aux = sym 15333 v.AddArg(ptr) 15334 v.AddArg(x) 15335 v.AddArg(mem) 15336 return true 15337 } 15338 // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) 15339 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 15340 // result: (SUBLmodify [off] {sym} ptr x mem) 15341 for { 15342 off := v.AuxInt 15343 sym := v.Aux 15344 mem := v.Args[2] 15345 ptr := v.Args[0] 15346 y := v.Args[1] 15347 if y.Op != OpAMD64SUBL { 15348 break 15349 } 15350 x := y.Args[1] 15351 l := y.Args[0] 15352 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15353 break 15354 } 15355 _ = l.Args[1] 15356 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 15357 break 15358 } 15359 v.reset(OpAMD64SUBLmodify) 15360 v.AuxInt = off 15361 v.Aux = sym 15362 v.AddArg(ptr) 15363 v.AddArg(x) 15364 v.AddArg(mem) 15365 return true 15366 } 15367 // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) 15368 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 15369 // result: (ANDLmodify [off] {sym} ptr x mem) 15370 for { 15371 off := v.AuxInt 15372 sym := v.Aux 15373 mem := v.Args[2] 15374 ptr := v.Args[0] 15375 y := v.Args[1] 15376 if y.Op != OpAMD64ANDL { 15377 break 15378 } 15379 x := y.Args[1] 15380 l := y.Args[0] 15381 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15382 break 15383 } 15384 _ = l.Args[1] 15385 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 15386 break 15387 } 15388 v.reset(OpAMD64ANDLmodify) 15389 v.AuxInt = off 15390 v.Aux = sym 15391 v.AddArg(ptr) 15392 v.AddArg(x) 15393 v.AddArg(mem) 15394 return true 15395 } 15396 // match: (MOVLstore {sym} [off] ptr y:(ANDL x l:(MOVLload [off] {sym} ptr mem)) mem) 15397 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 15398 // result: (ANDLmodify [off] {sym} ptr x mem) 15399 for { 15400 off := v.AuxInt 15401 sym := v.Aux 15402 mem := v.Args[2] 15403 ptr := v.Args[0] 15404 y := v.Args[1] 15405 if y.Op != OpAMD64ANDL { 15406 break 15407 } 15408 _ = y.Args[1] 15409 x := y.Args[0] 15410 l := y.Args[1] 15411 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15412 break 15413 } 15414 _ = l.Args[1] 15415 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 15416 break 15417 } 15418 v.reset(OpAMD64ANDLmodify) 15419 v.AuxInt = off 15420 v.Aux = sym 15421 v.AddArg(ptr) 15422 v.AddArg(x) 15423 v.AddArg(mem) 15424 return true 15425 } 15426 // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem) 15427 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 15428 // result: (ORLmodify [off] {sym} ptr x mem) 15429 for { 15430 off := v.AuxInt 15431 sym := v.Aux 15432 mem := v.Args[2] 15433 ptr := v.Args[0] 15434 y := v.Args[1] 15435 if y.Op != OpAMD64ORL { 15436 break 15437 } 15438 x := y.Args[1] 15439 l := y.Args[0] 15440 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15441 break 15442 } 15443 _ = l.Args[1] 15444 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 15445 break 15446 } 15447 v.reset(OpAMD64ORLmodify) 15448 v.AuxInt = off 15449 v.Aux = sym 15450 v.AddArg(ptr) 15451 v.AddArg(x) 15452 v.AddArg(mem) 15453 return true 15454 } 15455 // match: (MOVLstore {sym} [off] ptr y:(ORL x l:(MOVLload [off] {sym} ptr mem)) mem) 15456 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 15457 // result: (ORLmodify [off] {sym} ptr x mem) 15458 for { 15459 off := v.AuxInt 15460 sym := v.Aux 15461 mem := v.Args[2] 15462 ptr := v.Args[0] 15463 y := v.Args[1] 15464 if y.Op != OpAMD64ORL { 15465 break 15466 } 15467 _ = y.Args[1] 15468 x := y.Args[0] 15469 l := y.Args[1] 15470 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15471 break 15472 } 15473 _ = l.Args[1] 15474 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 15475 break 15476 } 15477 v.reset(OpAMD64ORLmodify) 15478 v.AuxInt = off 15479 v.Aux = sym 15480 v.AddArg(ptr) 15481 v.AddArg(x) 15482 v.AddArg(mem) 15483 return true 15484 } 15485 // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem) 15486 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 15487 // result: (XORLmodify [off] {sym} ptr x mem) 15488 for { 15489 off := v.AuxInt 15490 sym := v.Aux 15491 mem := v.Args[2] 15492 ptr := v.Args[0] 15493 y := v.Args[1] 15494 if y.Op != OpAMD64XORL { 15495 break 15496 } 15497 x := y.Args[1] 15498 l := y.Args[0] 15499 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15500 break 15501 } 15502 _ = l.Args[1] 15503 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 15504 break 15505 } 15506 v.reset(OpAMD64XORLmodify) 15507 v.AuxInt = off 15508 v.Aux = sym 15509 v.AddArg(ptr) 15510 v.AddArg(x) 15511 v.AddArg(mem) 15512 return true 15513 } 15514 // match: (MOVLstore {sym} [off] ptr y:(XORL x l:(MOVLload [off] {sym} ptr mem)) mem) 15515 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 15516 // result: (XORLmodify [off] {sym} ptr x mem) 15517 for { 15518 off := v.AuxInt 15519 sym := v.Aux 15520 mem := v.Args[2] 15521 ptr := v.Args[0] 15522 y := v.Args[1] 15523 if y.Op != OpAMD64XORL { 15524 break 15525 } 15526 _ = y.Args[1] 15527 x := y.Args[0] 15528 l := y.Args[1] 15529 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15530 break 15531 } 15532 _ = l.Args[1] 15533 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 15534 break 15535 } 15536 v.reset(OpAMD64XORLmodify) 15537 v.AuxInt = off 15538 v.Aux = sym 15539 v.AddArg(ptr) 15540 v.AddArg(x) 15541 v.AddArg(mem) 15542 return true 15543 } 15544 // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) x) mem) 15545 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 15546 // result: (BTCLmodify [off] {sym} ptr x mem) 15547 for { 15548 off := v.AuxInt 15549 sym := v.Aux 15550 mem := v.Args[2] 15551 ptr := v.Args[0] 15552 y := v.Args[1] 15553 if y.Op != OpAMD64BTCL { 15554 break 15555 } 15556 x := y.Args[1] 15557 l := y.Args[0] 15558 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15559 break 15560 } 15561 _ = l.Args[1] 15562 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 15563 break 15564 } 15565 v.reset(OpAMD64BTCLmodify) 15566 v.AuxInt = off 15567 v.Aux = sym 15568 v.AddArg(ptr) 15569 v.AddArg(x) 15570 v.AddArg(mem) 15571 return true 15572 } 15573 // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem) 15574 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 15575 // result: (BTRLmodify [off] {sym} ptr x mem) 15576 for { 15577 off := v.AuxInt 15578 sym := v.Aux 15579 mem := v.Args[2] 15580 ptr := v.Args[0] 15581 y := v.Args[1] 15582 if y.Op != OpAMD64BTRL { 15583 break 15584 } 15585 x := y.Args[1] 15586 l := y.Args[0] 15587 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15588 break 15589 } 15590 _ = l.Args[1] 15591 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 15592 break 15593 } 15594 v.reset(OpAMD64BTRLmodify) 15595 v.AuxInt = off 15596 v.Aux = sym 15597 v.AddArg(ptr) 15598 v.AddArg(x) 15599 v.AddArg(mem) 15600 return true 15601 } 15602 return false 15603 } 15604 func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { 15605 // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem) 15606 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 15607 // result: (BTSLmodify [off] {sym} ptr x mem) 15608 for { 15609 off := v.AuxInt 15610 sym := v.Aux 15611 mem := v.Args[2] 15612 ptr := v.Args[0] 15613 y := v.Args[1] 15614 if y.Op != OpAMD64BTSL { 15615 break 15616 } 15617 x := y.Args[1] 15618 l := y.Args[0] 15619 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15620 break 15621 } 15622 _ = l.Args[1] 15623 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 15624 break 15625 } 15626 v.reset(OpAMD64BTSLmodify) 15627 v.AuxInt = off 15628 v.Aux = sym 15629 v.AddArg(ptr) 15630 v.AddArg(x) 15631 v.AddArg(mem) 15632 return true 15633 } 15634 // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 15635 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 15636 // result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 15637 for { 15638 off := v.AuxInt 15639 sym := v.Aux 15640 mem := v.Args[2] 15641 ptr := v.Args[0] 15642 a := v.Args[1] 15643 if a.Op != OpAMD64ADDLconst { 15644 break 15645 } 15646 c := a.AuxInt 15647 l := a.Args[0] 15648 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15649 break 15650 } 15651 _ = l.Args[1] 15652 ptr2 := l.Args[0] 15653 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 15654 break 15655 } 15656 v.reset(OpAMD64ADDLconstmodify) 15657 v.AuxInt = makeValAndOff(c, off) 15658 v.Aux = sym 15659 v.AddArg(ptr) 15660 v.AddArg(mem) 15661 return true 15662 } 15663 // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 15664 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 15665 // result: (ANDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 15666 for { 15667 off := v.AuxInt 15668 sym := v.Aux 15669 mem := v.Args[2] 15670 ptr := v.Args[0] 15671 a := v.Args[1] 15672 if a.Op != OpAMD64ANDLconst { 15673 break 15674 } 15675 c := a.AuxInt 15676 l := a.Args[0] 15677 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15678 break 15679 } 15680 _ = l.Args[1] 15681 ptr2 := l.Args[0] 15682 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 15683 break 15684 } 15685 v.reset(OpAMD64ANDLconstmodify) 15686 v.AuxInt = makeValAndOff(c, off) 15687 v.Aux = sym 15688 v.AddArg(ptr) 15689 v.AddArg(mem) 15690 return true 15691 } 15692 // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 15693 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 15694 // result: (ORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 15695 for { 15696 off := v.AuxInt 15697 sym := v.Aux 15698 mem := v.Args[2] 15699 ptr := v.Args[0] 15700 a := v.Args[1] 15701 if a.Op != OpAMD64ORLconst { 15702 break 15703 } 15704 c := a.AuxInt 15705 l := a.Args[0] 15706 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15707 break 15708 } 15709 _ = l.Args[1] 15710 ptr2 := l.Args[0] 15711 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 15712 break 15713 } 15714 v.reset(OpAMD64ORLconstmodify) 15715 v.AuxInt = makeValAndOff(c, off) 15716 v.Aux = sym 15717 v.AddArg(ptr) 15718 v.AddArg(mem) 15719 return true 15720 } 15721 // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 15722 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 15723 // result: (XORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 15724 for { 15725 off := v.AuxInt 15726 sym := v.Aux 15727 mem := v.Args[2] 15728 ptr := v.Args[0] 15729 a := v.Args[1] 15730 if a.Op != OpAMD64XORLconst { 15731 break 15732 } 15733 c := a.AuxInt 15734 l := a.Args[0] 15735 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15736 break 15737 } 15738 _ = l.Args[1] 15739 ptr2 := l.Args[0] 15740 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 15741 break 15742 } 15743 v.reset(OpAMD64XORLconstmodify) 15744 v.AuxInt = makeValAndOff(c, off) 15745 v.Aux = sym 15746 v.AddArg(ptr) 15747 v.AddArg(mem) 15748 return true 15749 } 15750 // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 15751 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 15752 // result: (BTCLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 15753 for { 15754 off := v.AuxInt 15755 sym := v.Aux 15756 mem := v.Args[2] 15757 ptr := v.Args[0] 15758 a := v.Args[1] 15759 if a.Op != OpAMD64BTCLconst { 15760 break 15761 } 15762 c := a.AuxInt 15763 l := a.Args[0] 15764 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15765 break 15766 } 15767 _ = l.Args[1] 15768 ptr2 := l.Args[0] 15769 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 15770 break 15771 } 15772 v.reset(OpAMD64BTCLconstmodify) 15773 v.AuxInt = makeValAndOff(c, off) 15774 v.Aux = sym 15775 v.AddArg(ptr) 15776 v.AddArg(mem) 15777 return true 15778 } 15779 // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 15780 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 15781 // result: (BTRLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 15782 for { 15783 off := v.AuxInt 15784 sym := v.Aux 15785 mem := v.Args[2] 15786 ptr := v.Args[0] 15787 a := v.Args[1] 15788 if a.Op != OpAMD64BTRLconst { 15789 break 15790 } 15791 c := a.AuxInt 15792 l := a.Args[0] 15793 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15794 break 15795 } 15796 _ = l.Args[1] 15797 ptr2 := l.Args[0] 15798 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 15799 break 15800 } 15801 v.reset(OpAMD64BTRLconstmodify) 15802 v.AuxInt = makeValAndOff(c, off) 15803 v.Aux = sym 15804 v.AddArg(ptr) 15805 v.AddArg(mem) 15806 return true 15807 } 15808 // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 15809 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 15810 // result: (BTSLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 15811 for { 15812 off := v.AuxInt 15813 sym := v.Aux 15814 mem := v.Args[2] 15815 ptr := v.Args[0] 15816 a := v.Args[1] 15817 if a.Op != OpAMD64BTSLconst { 15818 break 15819 } 15820 c := a.AuxInt 15821 l := a.Args[0] 15822 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { 15823 break 15824 } 15825 _ = l.Args[1] 15826 ptr2 := l.Args[0] 15827 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 15828 break 15829 } 15830 v.reset(OpAMD64BTSLconstmodify) 15831 v.AuxInt = makeValAndOff(c, off) 15832 v.Aux = sym 15833 v.AddArg(ptr) 15834 v.AddArg(mem) 15835 return true 15836 } 15837 // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) 15838 // result: (MOVSSstore [off] {sym} ptr val mem) 15839 for { 15840 off := v.AuxInt 15841 sym := v.Aux 15842 mem := v.Args[2] 15843 ptr := v.Args[0] 15844 v_1 := v.Args[1] 15845 if v_1.Op != OpAMD64MOVLf2i { 15846 break 15847 } 15848 val := v_1.Args[0] 15849 v.reset(OpAMD64MOVSSstore) 15850 v.AuxInt = off 15851 v.Aux = sym 15852 v.AddArg(ptr) 15853 v.AddArg(val) 15854 v.AddArg(mem) 15855 return true 15856 } 15857 return false 15858 } 15859 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { 15860 b := v.Block 15861 typ := &b.Func.Config.Types 15862 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 15863 // cond: ValAndOff(sc).canAdd(off) 15864 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 15865 for { 15866 sc := v.AuxInt 15867 s := v.Aux 15868 mem := v.Args[1] 15869 v_0 := v.Args[0] 15870 if v_0.Op != OpAMD64ADDQconst { 15871 break 15872 } 15873 off := v_0.AuxInt 15874 ptr := v_0.Args[0] 15875 if !(ValAndOff(sc).canAdd(off)) { 15876 break 15877 } 15878 v.reset(OpAMD64MOVLstoreconst) 15879 v.AuxInt = ValAndOff(sc).add(off) 15880 v.Aux = s 15881 v.AddArg(ptr) 15882 v.AddArg(mem) 15883 return true 15884 } 15885 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 15886 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 15887 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 15888 for { 15889 sc := v.AuxInt 15890 sym1 := v.Aux 15891 mem := v.Args[1] 15892 v_0 := v.Args[0] 15893 if v_0.Op != OpAMD64LEAQ { 15894 break 15895 } 15896 off := v_0.AuxInt 15897 sym2 := v_0.Aux 15898 ptr := v_0.Args[0] 15899 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 15900 break 15901 } 15902 v.reset(OpAMD64MOVLstoreconst) 15903 v.AuxInt = ValAndOff(sc).add(off) 15904 v.Aux = mergeSym(sym1, sym2) 15905 v.AddArg(ptr) 15906 v.AddArg(mem) 15907 return true 15908 } 15909 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 15910 // cond: canMergeSym(sym1, sym2) 15911 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 15912 for { 15913 x := v.AuxInt 15914 sym1 := v.Aux 15915 mem := v.Args[1] 15916 v_0 := v.Args[0] 15917 if v_0.Op != OpAMD64LEAQ1 { 15918 break 15919 } 15920 off := v_0.AuxInt 15921 sym2 := v_0.Aux 15922 idx := v_0.Args[1] 15923 ptr := v_0.Args[0] 15924 if !(canMergeSym(sym1, sym2)) { 15925 break 15926 } 15927 v.reset(OpAMD64MOVLstoreconstidx1) 15928 v.AuxInt = ValAndOff(x).add(off) 15929 v.Aux = mergeSym(sym1, sym2) 15930 v.AddArg(ptr) 15931 v.AddArg(idx) 15932 v.AddArg(mem) 15933 return true 15934 } 15935 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 15936 // cond: canMergeSym(sym1, sym2) 15937 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 15938 for { 15939 x := v.AuxInt 15940 sym1 := v.Aux 15941 mem := v.Args[1] 15942 v_0 := v.Args[0] 15943 if v_0.Op != OpAMD64LEAQ4 { 15944 break 15945 } 15946 off := v_0.AuxInt 15947 sym2 := v_0.Aux 15948 idx := v_0.Args[1] 15949 ptr := v_0.Args[0] 15950 if !(canMergeSym(sym1, sym2)) { 15951 break 15952 } 15953 v.reset(OpAMD64MOVLstoreconstidx4) 15954 v.AuxInt = ValAndOff(x).add(off) 15955 v.Aux = mergeSym(sym1, sym2) 15956 v.AddArg(ptr) 15957 v.AddArg(idx) 15958 v.AddArg(mem) 15959 return true 15960 } 15961 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 15962 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 15963 for { 15964 x := v.AuxInt 15965 sym := v.Aux 15966 mem := v.Args[1] 15967 v_0 := v.Args[0] 15968 if v_0.Op != OpAMD64ADDQ { 15969 break 15970 } 15971 idx := v_0.Args[1] 15972 ptr := v_0.Args[0] 15973 v.reset(OpAMD64MOVLstoreconstidx1) 15974 v.AuxInt = x 15975 v.Aux = sym 15976 v.AddArg(ptr) 15977 v.AddArg(idx) 15978 v.AddArg(mem) 15979 return true 15980 } 15981 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 15982 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 15983 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 15984 for { 15985 c := v.AuxInt 15986 s := v.Aux 15987 _ = v.Args[1] 15988 p := v.Args[0] 15989 x := v.Args[1] 15990 if x.Op != OpAMD64MOVLstoreconst { 15991 break 15992 } 15993 a := x.AuxInt 15994 if x.Aux != s { 15995 break 15996 } 15997 mem := x.Args[1] 15998 if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 15999 break 16000 } 16001 v.reset(OpAMD64MOVQstore) 16002 v.AuxInt = ValAndOff(a).Off() 16003 v.Aux = s 16004 v.AddArg(p) 16005 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) 16006 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 16007 v.AddArg(v0) 16008 v.AddArg(mem) 16009 return true 16010 } 16011 // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem)) 16012 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 16013 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 16014 for { 16015 a := v.AuxInt 16016 s := v.Aux 16017 _ = v.Args[1] 16018 p := v.Args[0] 16019 x := v.Args[1] 16020 if x.Op != OpAMD64MOVLstoreconst { 16021 break 16022 } 16023 c := x.AuxInt 16024 if x.Aux != s { 16025 break 16026 } 16027 mem := x.Args[1] 16028 if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 16029 break 16030 } 16031 v.reset(OpAMD64MOVQstore) 16032 v.AuxInt = ValAndOff(a).Off() 16033 v.Aux = s 16034 v.AddArg(p) 16035 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) 16036 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 16037 v.AddArg(v0) 16038 v.AddArg(mem) 16039 return true 16040 } 16041 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 16042 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 16043 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 16044 for { 16045 sc := v.AuxInt 16046 sym1 := v.Aux 16047 mem := v.Args[1] 16048 v_0 := v.Args[0] 16049 if v_0.Op != OpAMD64LEAL { 16050 break 16051 } 16052 off := v_0.AuxInt 16053 sym2 := v_0.Aux 16054 ptr := v_0.Args[0] 16055 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 16056 break 16057 } 16058 v.reset(OpAMD64MOVLstoreconst) 16059 v.AuxInt = ValAndOff(sc).add(off) 16060 v.Aux = mergeSym(sym1, sym2) 16061 v.AddArg(ptr) 16062 v.AddArg(mem) 16063 return true 16064 } 16065 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 16066 // cond: ValAndOff(sc).canAdd(off) 16067 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 16068 for { 16069 sc := v.AuxInt 16070 s := v.Aux 16071 mem := v.Args[1] 16072 v_0 := v.Args[0] 16073 if v_0.Op != OpAMD64ADDLconst { 16074 break 16075 } 16076 off := v_0.AuxInt 16077 ptr := v_0.Args[0] 16078 if !(ValAndOff(sc).canAdd(off)) { 16079 break 16080 } 16081 v.reset(OpAMD64MOVLstoreconst) 16082 v.AuxInt = ValAndOff(sc).add(off) 16083 v.Aux = s 16084 v.AddArg(ptr) 16085 v.AddArg(mem) 16086 return true 16087 } 16088 return false 16089 } 16090 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { 16091 b := v.Block 16092 typ := &b.Func.Config.Types 16093 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 16094 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 16095 for { 16096 c := v.AuxInt 16097 sym := v.Aux 16098 mem := v.Args[2] 16099 ptr := v.Args[0] 16100 v_1 := v.Args[1] 16101 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { 16102 break 16103 } 16104 idx := v_1.Args[0] 16105 v.reset(OpAMD64MOVLstoreconstidx4) 16106 v.AuxInt = c 16107 v.Aux = sym 16108 v.AddArg(ptr) 16109 v.AddArg(idx) 16110 v.AddArg(mem) 16111 return true 16112 } 16113 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 16114 // cond: ValAndOff(x).canAdd(c) 16115 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 16116 for { 16117 x := v.AuxInt 16118 sym := v.Aux 16119 mem := v.Args[2] 16120 v_0 := v.Args[0] 16121 if v_0.Op != OpAMD64ADDQconst { 16122 break 16123 } 16124 c := v_0.AuxInt 16125 ptr := v_0.Args[0] 16126 idx := v.Args[1] 16127 if !(ValAndOff(x).canAdd(c)) { 16128 break 16129 } 16130 v.reset(OpAMD64MOVLstoreconstidx1) 16131 v.AuxInt = ValAndOff(x).add(c) 16132 v.Aux = sym 16133 v.AddArg(ptr) 16134 v.AddArg(idx) 16135 v.AddArg(mem) 16136 return true 16137 } 16138 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 16139 // cond: ValAndOff(x).canAdd(c) 16140 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 16141 for { 16142 x := v.AuxInt 16143 sym := v.Aux 16144 mem := v.Args[2] 16145 ptr := v.Args[0] 16146 v_1 := v.Args[1] 16147 if v_1.Op != OpAMD64ADDQconst { 16148 break 16149 } 16150 c := v_1.AuxInt 16151 idx := v_1.Args[0] 16152 if !(ValAndOff(x).canAdd(c)) { 16153 break 16154 } 16155 v.reset(OpAMD64MOVLstoreconstidx1) 16156 v.AuxInt = ValAndOff(x).add(c) 16157 v.Aux = sym 16158 v.AddArg(ptr) 16159 v.AddArg(idx) 16160 v.AddArg(mem) 16161 return true 16162 } 16163 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 16164 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 16165 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 16166 for { 16167 c := v.AuxInt 16168 s := v.Aux 16169 _ = v.Args[2] 16170 p := v.Args[0] 16171 i := v.Args[1] 16172 x := v.Args[2] 16173 if x.Op != OpAMD64MOVLstoreconstidx1 { 16174 break 16175 } 16176 a := x.AuxInt 16177 if x.Aux != s { 16178 break 16179 } 16180 mem := x.Args[2] 16181 if p != x.Args[0] || i != x.Args[1] || !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 16182 break 16183 } 16184 v.reset(OpAMD64MOVQstoreidx1) 16185 v.AuxInt = ValAndOff(a).Off() 16186 v.Aux = s 16187 v.AddArg(p) 16188 v.AddArg(i) 16189 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 16190 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 16191 v.AddArg(v0) 16192 v.AddArg(mem) 16193 return true 16194 } 16195 return false 16196 } 16197 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { 16198 b := v.Block 16199 typ := &b.Func.Config.Types 16200 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 16201 // cond: ValAndOff(x).canAdd(c) 16202 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 16203 for { 16204 x := v.AuxInt 16205 sym := v.Aux 16206 mem := v.Args[2] 16207 v_0 := v.Args[0] 16208 if v_0.Op != OpAMD64ADDQconst { 16209 break 16210 } 16211 c := v_0.AuxInt 16212 ptr := v_0.Args[0] 16213 idx := v.Args[1] 16214 if !(ValAndOff(x).canAdd(c)) { 16215 break 16216 } 16217 v.reset(OpAMD64MOVLstoreconstidx4) 16218 v.AuxInt = ValAndOff(x).add(c) 16219 v.Aux = sym 16220 v.AddArg(ptr) 16221 v.AddArg(idx) 16222 v.AddArg(mem) 16223 return true 16224 } 16225 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 16226 // cond: ValAndOff(x).canAdd(4*c) 16227 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 16228 for { 16229 x := v.AuxInt 16230 sym := v.Aux 16231 mem := v.Args[2] 16232 ptr := v.Args[0] 16233 v_1 := v.Args[1] 16234 if v_1.Op != OpAMD64ADDQconst { 16235 break 16236 } 16237 c := v_1.AuxInt 16238 idx := v_1.Args[0] 16239 if !(ValAndOff(x).canAdd(4 * c)) { 16240 break 16241 } 16242 v.reset(OpAMD64MOVLstoreconstidx4) 16243 v.AuxInt = ValAndOff(x).add(4 * c) 16244 v.Aux = sym 16245 v.AddArg(ptr) 16246 v.AddArg(idx) 16247 v.AddArg(mem) 16248 return true 16249 } 16250 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 16251 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 16252 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 16253 for { 16254 c := v.AuxInt 16255 s := v.Aux 16256 _ = v.Args[2] 16257 p := v.Args[0] 16258 i := v.Args[1] 16259 x := v.Args[2] 16260 if x.Op != OpAMD64MOVLstoreconstidx4 { 16261 break 16262 } 16263 a := x.AuxInt 16264 if x.Aux != s { 16265 break 16266 } 16267 mem := x.Args[2] 16268 if p != x.Args[0] || i != x.Args[1] || !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 16269 break 16270 } 16271 v.reset(OpAMD64MOVQstoreidx1) 16272 v.AuxInt = ValAndOff(a).Off() 16273 v.Aux = s 16274 v.AddArg(p) 16275 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 16276 v0.AuxInt = 2 16277 v0.AddArg(i) 16278 v.AddArg(v0) 16279 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 16280 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 16281 v.AddArg(v1) 16282 v.AddArg(mem) 16283 return true 16284 } 16285 return false 16286 } 16287 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { 16288 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 16289 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 16290 for { 16291 c := v.AuxInt 16292 sym := v.Aux 16293 mem := v.Args[3] 16294 ptr := v.Args[0] 16295 v_1 := v.Args[1] 16296 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { 16297 break 16298 } 16299 idx := v_1.Args[0] 16300 val := v.Args[2] 16301 v.reset(OpAMD64MOVLstoreidx4) 16302 v.AuxInt = c 16303 v.Aux = sym 16304 v.AddArg(ptr) 16305 v.AddArg(idx) 16306 v.AddArg(val) 16307 v.AddArg(mem) 16308 return true 16309 } 16310 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 16311 // result: (MOVLstoreidx8 [c] {sym} ptr idx val mem) 16312 for { 16313 c := v.AuxInt 16314 sym := v.Aux 16315 mem := v.Args[3] 16316 ptr := v.Args[0] 16317 v_1 := v.Args[1] 16318 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { 16319 break 16320 } 16321 idx := v_1.Args[0] 16322 val := v.Args[2] 16323 v.reset(OpAMD64MOVLstoreidx8) 16324 v.AuxInt = c 16325 v.Aux = sym 16326 v.AddArg(ptr) 16327 v.AddArg(idx) 16328 v.AddArg(val) 16329 v.AddArg(mem) 16330 return true 16331 } 16332 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 16333 // cond: is32Bit(c+d) 16334 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 16335 for { 16336 c := v.AuxInt 16337 sym := v.Aux 16338 mem := v.Args[3] 16339 v_0 := v.Args[0] 16340 if v_0.Op != OpAMD64ADDQconst { 16341 break 16342 } 16343 d := v_0.AuxInt 16344 ptr := v_0.Args[0] 16345 idx := v.Args[1] 16346 val := v.Args[2] 16347 if !(is32Bit(c + d)) { 16348 break 16349 } 16350 v.reset(OpAMD64MOVLstoreidx1) 16351 v.AuxInt = c + d 16352 v.Aux = sym 16353 v.AddArg(ptr) 16354 v.AddArg(idx) 16355 v.AddArg(val) 16356 v.AddArg(mem) 16357 return true 16358 } 16359 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 16360 // cond: is32Bit(c+d) 16361 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 16362 for { 16363 c := v.AuxInt 16364 sym := v.Aux 16365 mem := v.Args[3] 16366 ptr := v.Args[0] 16367 v_1 := v.Args[1] 16368 if v_1.Op != OpAMD64ADDQconst { 16369 break 16370 } 16371 d := v_1.AuxInt 16372 idx := v_1.Args[0] 16373 val := v.Args[2] 16374 if !(is32Bit(c + d)) { 16375 break 16376 } 16377 v.reset(OpAMD64MOVLstoreidx1) 16378 v.AuxInt = c + d 16379 v.Aux = sym 16380 v.AddArg(ptr) 16381 v.AddArg(idx) 16382 v.AddArg(val) 16383 v.AddArg(mem) 16384 return true 16385 } 16386 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 16387 // cond: x.Uses == 1 && clobber(x) 16388 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 16389 for { 16390 i := v.AuxInt 16391 s := v.Aux 16392 _ = v.Args[3] 16393 p := v.Args[0] 16394 idx := v.Args[1] 16395 v_2 := v.Args[2] 16396 if v_2.Op != OpAMD64SHRQconst || v_2.AuxInt != 32 { 16397 break 16398 } 16399 w := v_2.Args[0] 16400 x := v.Args[3] 16401 if x.Op != OpAMD64MOVLstoreidx1 || x.AuxInt != i-4 || x.Aux != s { 16402 break 16403 } 16404 mem := x.Args[3] 16405 if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { 16406 break 16407 } 16408 v.reset(OpAMD64MOVQstoreidx1) 16409 v.AuxInt = i - 4 16410 v.Aux = s 16411 v.AddArg(p) 16412 v.AddArg(idx) 16413 v.AddArg(w) 16414 v.AddArg(mem) 16415 return true 16416 } 16417 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 16418 // cond: x.Uses == 1 && clobber(x) 16419 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 16420 for { 16421 i := v.AuxInt 16422 s := v.Aux 16423 _ = v.Args[3] 16424 p := v.Args[0] 16425 idx := v.Args[1] 16426 v_2 := v.Args[2] 16427 if v_2.Op != OpAMD64SHRQconst { 16428 break 16429 } 16430 j := v_2.AuxInt 16431 w := v_2.Args[0] 16432 x := v.Args[3] 16433 if x.Op != OpAMD64MOVLstoreidx1 || x.AuxInt != i-4 || x.Aux != s { 16434 break 16435 } 16436 mem := x.Args[3] 16437 if p != x.Args[0] || idx != x.Args[1] { 16438 break 16439 } 16440 w0 := x.Args[2] 16441 if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 16442 break 16443 } 16444 v.reset(OpAMD64MOVQstoreidx1) 16445 v.AuxInt = i - 4 16446 v.Aux = s 16447 v.AddArg(p) 16448 v.AddArg(idx) 16449 v.AddArg(w0) 16450 v.AddArg(mem) 16451 return true 16452 } 16453 // match: (MOVLstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 16454 // cond: is32Bit(i+c) 16455 // result: (MOVLstore [i+c] {s} p w mem) 16456 for { 16457 i := v.AuxInt 16458 s := v.Aux 16459 mem := v.Args[3] 16460 p := v.Args[0] 16461 v_1 := v.Args[1] 16462 if v_1.Op != OpAMD64MOVQconst { 16463 break 16464 } 16465 c := v_1.AuxInt 16466 w := v.Args[2] 16467 if !(is32Bit(i + c)) { 16468 break 16469 } 16470 v.reset(OpAMD64MOVLstore) 16471 v.AuxInt = i + c 16472 v.Aux = s 16473 v.AddArg(p) 16474 v.AddArg(w) 16475 v.AddArg(mem) 16476 return true 16477 } 16478 return false 16479 } 16480 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { 16481 b := v.Block 16482 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 16483 // cond: is32Bit(c+d) 16484 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 16485 for { 16486 c := v.AuxInt 16487 sym := v.Aux 16488 mem := v.Args[3] 16489 v_0 := v.Args[0] 16490 if v_0.Op != OpAMD64ADDQconst { 16491 break 16492 } 16493 d := v_0.AuxInt 16494 ptr := v_0.Args[0] 16495 idx := v.Args[1] 16496 val := v.Args[2] 16497 if !(is32Bit(c + d)) { 16498 break 16499 } 16500 v.reset(OpAMD64MOVLstoreidx4) 16501 v.AuxInt = c + d 16502 v.Aux = sym 16503 v.AddArg(ptr) 16504 v.AddArg(idx) 16505 v.AddArg(val) 16506 v.AddArg(mem) 16507 return true 16508 } 16509 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 16510 // cond: is32Bit(c+4*d) 16511 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 16512 for { 16513 c := v.AuxInt 16514 sym := v.Aux 16515 mem := v.Args[3] 16516 ptr := v.Args[0] 16517 v_1 := v.Args[1] 16518 if v_1.Op != OpAMD64ADDQconst { 16519 break 16520 } 16521 d := v_1.AuxInt 16522 idx := v_1.Args[0] 16523 val := v.Args[2] 16524 if !(is32Bit(c + 4*d)) { 16525 break 16526 } 16527 v.reset(OpAMD64MOVLstoreidx4) 16528 v.AuxInt = c + 4*d 16529 v.Aux = sym 16530 v.AddArg(ptr) 16531 v.AddArg(idx) 16532 v.AddArg(val) 16533 v.AddArg(mem) 16534 return true 16535 } 16536 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 16537 // cond: x.Uses == 1 && clobber(x) 16538 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 16539 for { 16540 i := v.AuxInt 16541 s := v.Aux 16542 _ = v.Args[3] 16543 p := v.Args[0] 16544 idx := v.Args[1] 16545 v_2 := v.Args[2] 16546 if v_2.Op != OpAMD64SHRQconst || v_2.AuxInt != 32 { 16547 break 16548 } 16549 w := v_2.Args[0] 16550 x := v.Args[3] 16551 if x.Op != OpAMD64MOVLstoreidx4 || x.AuxInt != i-4 || x.Aux != s { 16552 break 16553 } 16554 mem := x.Args[3] 16555 if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { 16556 break 16557 } 16558 v.reset(OpAMD64MOVQstoreidx1) 16559 v.AuxInt = i - 4 16560 v.Aux = s 16561 v.AddArg(p) 16562 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 16563 v0.AuxInt = 2 16564 v0.AddArg(idx) 16565 v.AddArg(v0) 16566 v.AddArg(w) 16567 v.AddArg(mem) 16568 return true 16569 } 16570 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 16571 // cond: x.Uses == 1 && clobber(x) 16572 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 16573 for { 16574 i := v.AuxInt 16575 s := v.Aux 16576 _ = v.Args[3] 16577 p := v.Args[0] 16578 idx := v.Args[1] 16579 v_2 := v.Args[2] 16580 if v_2.Op != OpAMD64SHRQconst { 16581 break 16582 } 16583 j := v_2.AuxInt 16584 w := v_2.Args[0] 16585 x := v.Args[3] 16586 if x.Op != OpAMD64MOVLstoreidx4 || x.AuxInt != i-4 || x.Aux != s { 16587 break 16588 } 16589 mem := x.Args[3] 16590 if p != x.Args[0] || idx != x.Args[1] { 16591 break 16592 } 16593 w0 := x.Args[2] 16594 if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 16595 break 16596 } 16597 v.reset(OpAMD64MOVQstoreidx1) 16598 v.AuxInt = i - 4 16599 v.Aux = s 16600 v.AddArg(p) 16601 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 16602 v0.AuxInt = 2 16603 v0.AddArg(idx) 16604 v.AddArg(v0) 16605 v.AddArg(w0) 16606 v.AddArg(mem) 16607 return true 16608 } 16609 // match: (MOVLstoreidx4 [i] {s} p (MOVQconst [c]) w mem) 16610 // cond: is32Bit(i+4*c) 16611 // result: (MOVLstore [i+4*c] {s} p w mem) 16612 for { 16613 i := v.AuxInt 16614 s := v.Aux 16615 mem := v.Args[3] 16616 p := v.Args[0] 16617 v_1 := v.Args[1] 16618 if v_1.Op != OpAMD64MOVQconst { 16619 break 16620 } 16621 c := v_1.AuxInt 16622 w := v.Args[2] 16623 if !(is32Bit(i + 4*c)) { 16624 break 16625 } 16626 v.reset(OpAMD64MOVLstore) 16627 v.AuxInt = i + 4*c 16628 v.Aux = s 16629 v.AddArg(p) 16630 v.AddArg(w) 16631 v.AddArg(mem) 16632 return true 16633 } 16634 return false 16635 } 16636 func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { 16637 // match: (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 16638 // cond: is32Bit(c+d) 16639 // result: (MOVLstoreidx8 [c+d] {sym} ptr idx val mem) 16640 for { 16641 c := v.AuxInt 16642 sym := v.Aux 16643 mem := v.Args[3] 16644 v_0 := v.Args[0] 16645 if v_0.Op != OpAMD64ADDQconst { 16646 break 16647 } 16648 d := v_0.AuxInt 16649 ptr := v_0.Args[0] 16650 idx := v.Args[1] 16651 val := v.Args[2] 16652 if !(is32Bit(c + d)) { 16653 break 16654 } 16655 v.reset(OpAMD64MOVLstoreidx8) 16656 v.AuxInt = c + d 16657 v.Aux = sym 16658 v.AddArg(ptr) 16659 v.AddArg(idx) 16660 v.AddArg(val) 16661 v.AddArg(mem) 16662 return true 16663 } 16664 // match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 16665 // cond: is32Bit(c+8*d) 16666 // result: (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem) 16667 for { 16668 c := v.AuxInt 16669 sym := v.Aux 16670 mem := v.Args[3] 16671 ptr := v.Args[0] 16672 v_1 := v.Args[1] 16673 if v_1.Op != OpAMD64ADDQconst { 16674 break 16675 } 16676 d := v_1.AuxInt 16677 idx := v_1.Args[0] 16678 val := v.Args[2] 16679 if !(is32Bit(c + 8*d)) { 16680 break 16681 } 16682 v.reset(OpAMD64MOVLstoreidx8) 16683 v.AuxInt = c + 8*d 16684 v.Aux = sym 16685 v.AddArg(ptr) 16686 v.AddArg(idx) 16687 v.AddArg(val) 16688 v.AddArg(mem) 16689 return true 16690 } 16691 // match: (MOVLstoreidx8 [i] {s} p (MOVQconst [c]) w mem) 16692 // cond: is32Bit(i+8*c) 16693 // result: (MOVLstore [i+8*c] {s} p w mem) 16694 for { 16695 i := v.AuxInt 16696 s := v.Aux 16697 mem := v.Args[3] 16698 p := v.Args[0] 16699 v_1 := v.Args[1] 16700 if v_1.Op != OpAMD64MOVQconst { 16701 break 16702 } 16703 c := v_1.AuxInt 16704 w := v.Args[2] 16705 if !(is32Bit(i + 8*c)) { 16706 break 16707 } 16708 v.reset(OpAMD64MOVLstore) 16709 v.AuxInt = i + 8*c 16710 v.Aux = s 16711 v.AddArg(p) 16712 v.AddArg(w) 16713 v.AddArg(mem) 16714 return true 16715 } 16716 return false 16717 } 16718 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { 16719 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 16720 // cond: is32Bit(off1+off2) 16721 // result: (MOVOload [off1+off2] {sym} ptr mem) 16722 for { 16723 off1 := v.AuxInt 16724 sym := v.Aux 16725 mem := v.Args[1] 16726 v_0 := v.Args[0] 16727 if v_0.Op != OpAMD64ADDQconst { 16728 break 16729 } 16730 off2 := v_0.AuxInt 16731 ptr := v_0.Args[0] 16732 if !(is32Bit(off1 + off2)) { 16733 break 16734 } 16735 v.reset(OpAMD64MOVOload) 16736 v.AuxInt = off1 + off2 16737 v.Aux = sym 16738 v.AddArg(ptr) 16739 v.AddArg(mem) 16740 return true 16741 } 16742 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 16743 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 16744 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 16745 for { 16746 off1 := v.AuxInt 16747 sym1 := v.Aux 16748 mem := v.Args[1] 16749 v_0 := v.Args[0] 16750 if v_0.Op != OpAMD64LEAQ { 16751 break 16752 } 16753 off2 := v_0.AuxInt 16754 sym2 := v_0.Aux 16755 base := v_0.Args[0] 16756 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 16757 break 16758 } 16759 v.reset(OpAMD64MOVOload) 16760 v.AuxInt = off1 + off2 16761 v.Aux = mergeSym(sym1, sym2) 16762 v.AddArg(base) 16763 v.AddArg(mem) 16764 return true 16765 } 16766 return false 16767 } 16768 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { 16769 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 16770 // cond: is32Bit(off1+off2) 16771 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 16772 for { 16773 off1 := v.AuxInt 16774 sym := v.Aux 16775 mem := v.Args[2] 16776 v_0 := v.Args[0] 16777 if v_0.Op != OpAMD64ADDQconst { 16778 break 16779 } 16780 off2 := v_0.AuxInt 16781 ptr := v_0.Args[0] 16782 val := v.Args[1] 16783 if !(is32Bit(off1 + off2)) { 16784 break 16785 } 16786 v.reset(OpAMD64MOVOstore) 16787 v.AuxInt = off1 + off2 16788 v.Aux = sym 16789 v.AddArg(ptr) 16790 v.AddArg(val) 16791 v.AddArg(mem) 16792 return true 16793 } 16794 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 16795 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 16796 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 16797 for { 16798 off1 := v.AuxInt 16799 sym1 := v.Aux 16800 mem := v.Args[2] 16801 v_0 := v.Args[0] 16802 if v_0.Op != OpAMD64LEAQ { 16803 break 16804 } 16805 off2 := v_0.AuxInt 16806 sym2 := v_0.Aux 16807 base := v_0.Args[0] 16808 val := v.Args[1] 16809 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 16810 break 16811 } 16812 v.reset(OpAMD64MOVOstore) 16813 v.AuxInt = off1 + off2 16814 v.Aux = mergeSym(sym1, sym2) 16815 v.AddArg(base) 16816 v.AddArg(val) 16817 v.AddArg(mem) 16818 return true 16819 } 16820 return false 16821 } 16822 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { 16823 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 16824 // cond: is32Bit(off1+off2) 16825 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 16826 for { 16827 off1 := v.AuxInt 16828 sym := v.Aux 16829 mem := v.Args[1] 16830 v_0 := v.Args[0] 16831 if v_0.Op != OpAMD64ADDQconst { 16832 break 16833 } 16834 off2 := v_0.AuxInt 16835 ptr := v_0.Args[0] 16836 if !(is32Bit(off1 + off2)) { 16837 break 16838 } 16839 v.reset(OpAMD64MOVQatomicload) 16840 v.AuxInt = off1 + off2 16841 v.Aux = sym 16842 v.AddArg(ptr) 16843 v.AddArg(mem) 16844 return true 16845 } 16846 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 16847 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 16848 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 16849 for { 16850 off1 := v.AuxInt 16851 sym1 := v.Aux 16852 mem := v.Args[1] 16853 v_0 := v.Args[0] 16854 if v_0.Op != OpAMD64LEAQ { 16855 break 16856 } 16857 off2 := v_0.AuxInt 16858 sym2 := v_0.Aux 16859 ptr := v_0.Args[0] 16860 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 16861 break 16862 } 16863 v.reset(OpAMD64MOVQatomicload) 16864 v.AuxInt = off1 + off2 16865 v.Aux = mergeSym(sym1, sym2) 16866 v.AddArg(ptr) 16867 v.AddArg(mem) 16868 return true 16869 } 16870 return false 16871 } 16872 func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool { 16873 b := v.Block 16874 // match: (MOVQf2i <t> (Arg <u> [off] {sym})) 16875 // cond: t.Size() == u.Size() 16876 // result: @b.Func.Entry (Arg <t> [off] {sym}) 16877 for { 16878 t := v.Type 16879 v_0 := v.Args[0] 16880 if v_0.Op != OpArg { 16881 break 16882 } 16883 u := v_0.Type 16884 off := v_0.AuxInt 16885 sym := v_0.Aux 16886 if !(t.Size() == u.Size()) { 16887 break 16888 } 16889 b = b.Func.Entry 16890 v0 := b.NewValue0(v.Pos, OpArg, t) 16891 v.reset(OpCopy) 16892 v.AddArg(v0) 16893 v0.AuxInt = off 16894 v0.Aux = sym 16895 return true 16896 } 16897 return false 16898 } 16899 func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { 16900 b := v.Block 16901 // match: (MOVQi2f <t> (Arg <u> [off] {sym})) 16902 // cond: t.Size() == u.Size() 16903 // result: @b.Func.Entry (Arg <t> [off] {sym}) 16904 for { 16905 t := v.Type 16906 v_0 := v.Args[0] 16907 if v_0.Op != OpArg { 16908 break 16909 } 16910 u := v_0.Type 16911 off := v_0.AuxInt 16912 sym := v_0.Aux 16913 if !(t.Size() == u.Size()) { 16914 break 16915 } 16916 b = b.Func.Entry 16917 v0 := b.NewValue0(v.Pos, OpArg, t) 16918 v.reset(OpCopy) 16919 v.AddArg(v0) 16920 v0.AuxInt = off 16921 v0.Aux = sym 16922 return true 16923 } 16924 return false 16925 } 16926 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { 16927 b := v.Block 16928 config := b.Func.Config 16929 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 16930 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 16931 // result: x 16932 for { 16933 off := v.AuxInt 16934 sym := v.Aux 16935 _ = v.Args[1] 16936 ptr := v.Args[0] 16937 v_1 := v.Args[1] 16938 if v_1.Op != OpAMD64MOVQstore { 16939 break 16940 } 16941 off2 := v_1.AuxInt 16942 sym2 := v_1.Aux 16943 _ = v_1.Args[2] 16944 ptr2 := v_1.Args[0] 16945 x := v_1.Args[1] 16946 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 16947 break 16948 } 16949 v.reset(OpCopy) 16950 v.Type = x.Type 16951 v.AddArg(x) 16952 return true 16953 } 16954 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 16955 // cond: is32Bit(off1+off2) 16956 // result: (MOVQload [off1+off2] {sym} ptr mem) 16957 for { 16958 off1 := v.AuxInt 16959 sym := v.Aux 16960 mem := v.Args[1] 16961 v_0 := v.Args[0] 16962 if v_0.Op != OpAMD64ADDQconst { 16963 break 16964 } 16965 off2 := v_0.AuxInt 16966 ptr := v_0.Args[0] 16967 if !(is32Bit(off1 + off2)) { 16968 break 16969 } 16970 v.reset(OpAMD64MOVQload) 16971 v.AuxInt = off1 + off2 16972 v.Aux = sym 16973 v.AddArg(ptr) 16974 v.AddArg(mem) 16975 return true 16976 } 16977 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 16978 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 16979 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 16980 for { 16981 off1 := v.AuxInt 16982 sym1 := v.Aux 16983 mem := v.Args[1] 16984 v_0 := v.Args[0] 16985 if v_0.Op != OpAMD64LEAQ { 16986 break 16987 } 16988 off2 := v_0.AuxInt 16989 sym2 := v_0.Aux 16990 base := v_0.Args[0] 16991 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 16992 break 16993 } 16994 v.reset(OpAMD64MOVQload) 16995 v.AuxInt = off1 + off2 16996 v.Aux = mergeSym(sym1, sym2) 16997 v.AddArg(base) 16998 v.AddArg(mem) 16999 return true 17000 } 17001 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 17002 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 17003 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 17004 for { 17005 off1 := v.AuxInt 17006 sym1 := v.Aux 17007 mem := v.Args[1] 17008 v_0 := v.Args[0] 17009 if v_0.Op != OpAMD64LEAQ1 { 17010 break 17011 } 17012 off2 := v_0.AuxInt 17013 sym2 := v_0.Aux 17014 idx := v_0.Args[1] 17015 ptr := v_0.Args[0] 17016 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 17017 break 17018 } 17019 v.reset(OpAMD64MOVQloadidx1) 17020 v.AuxInt = off1 + off2 17021 v.Aux = mergeSym(sym1, sym2) 17022 v.AddArg(ptr) 17023 v.AddArg(idx) 17024 v.AddArg(mem) 17025 return true 17026 } 17027 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 17028 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 17029 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 17030 for { 17031 off1 := v.AuxInt 17032 sym1 := v.Aux 17033 mem := v.Args[1] 17034 v_0 := v.Args[0] 17035 if v_0.Op != OpAMD64LEAQ8 { 17036 break 17037 } 17038 off2 := v_0.AuxInt 17039 sym2 := v_0.Aux 17040 idx := v_0.Args[1] 17041 ptr := v_0.Args[0] 17042 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 17043 break 17044 } 17045 v.reset(OpAMD64MOVQloadidx8) 17046 v.AuxInt = off1 + off2 17047 v.Aux = mergeSym(sym1, sym2) 17048 v.AddArg(ptr) 17049 v.AddArg(idx) 17050 v.AddArg(mem) 17051 return true 17052 } 17053 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 17054 // cond: ptr.Op != OpSB 17055 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 17056 for { 17057 off := v.AuxInt 17058 sym := v.Aux 17059 mem := v.Args[1] 17060 v_0 := v.Args[0] 17061 if v_0.Op != OpAMD64ADDQ { 17062 break 17063 } 17064 idx := v_0.Args[1] 17065 ptr := v_0.Args[0] 17066 if !(ptr.Op != OpSB) { 17067 break 17068 } 17069 v.reset(OpAMD64MOVQloadidx1) 17070 v.AuxInt = off 17071 v.Aux = sym 17072 v.AddArg(ptr) 17073 v.AddArg(idx) 17074 v.AddArg(mem) 17075 return true 17076 } 17077 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 17078 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 17079 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 17080 for { 17081 off1 := v.AuxInt 17082 sym1 := v.Aux 17083 mem := v.Args[1] 17084 v_0 := v.Args[0] 17085 if v_0.Op != OpAMD64LEAL { 17086 break 17087 } 17088 off2 := v_0.AuxInt 17089 sym2 := v_0.Aux 17090 base := v_0.Args[0] 17091 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 17092 break 17093 } 17094 v.reset(OpAMD64MOVQload) 17095 v.AuxInt = off1 + off2 17096 v.Aux = mergeSym(sym1, sym2) 17097 v.AddArg(base) 17098 v.AddArg(mem) 17099 return true 17100 } 17101 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 17102 // cond: is32Bit(off1+off2) 17103 // result: (MOVQload [off1+off2] {sym} ptr mem) 17104 for { 17105 off1 := v.AuxInt 17106 sym := v.Aux 17107 mem := v.Args[1] 17108 v_0 := v.Args[0] 17109 if v_0.Op != OpAMD64ADDLconst { 17110 break 17111 } 17112 off2 := v_0.AuxInt 17113 ptr := v_0.Args[0] 17114 if !(is32Bit(off1 + off2)) { 17115 break 17116 } 17117 v.reset(OpAMD64MOVQload) 17118 v.AuxInt = off1 + off2 17119 v.Aux = sym 17120 v.AddArg(ptr) 17121 v.AddArg(mem) 17122 return true 17123 } 17124 // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) 17125 // result: (MOVQf2i val) 17126 for { 17127 off := v.AuxInt 17128 sym := v.Aux 17129 _ = v.Args[1] 17130 ptr := v.Args[0] 17131 v_1 := v.Args[1] 17132 if v_1.Op != OpAMD64MOVSDstore || v_1.AuxInt != off || v_1.Aux != sym { 17133 break 17134 } 17135 _ = v_1.Args[2] 17136 if ptr != v_1.Args[0] { 17137 break 17138 } 17139 val := v_1.Args[1] 17140 v.reset(OpAMD64MOVQf2i) 17141 v.AddArg(val) 17142 return true 17143 } 17144 // match: (MOVQload [off] {sym} (SB) _) 17145 // cond: symIsRO(sym) 17146 // result: (MOVQconst [int64(read64(sym, off, config.BigEndian))]) 17147 for { 17148 off := v.AuxInt 17149 sym := v.Aux 17150 _ = v.Args[1] 17151 v_0 := v.Args[0] 17152 if v_0.Op != OpSB || !(symIsRO(sym)) { 17153 break 17154 } 17155 v.reset(OpAMD64MOVQconst) 17156 v.AuxInt = int64(read64(sym, off, config.BigEndian)) 17157 return true 17158 } 17159 return false 17160 } 17161 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { 17162 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 17163 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 17164 for { 17165 c := v.AuxInt 17166 sym := v.Aux 17167 mem := v.Args[2] 17168 ptr := v.Args[0] 17169 v_1 := v.Args[1] 17170 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { 17171 break 17172 } 17173 idx := v_1.Args[0] 17174 v.reset(OpAMD64MOVQloadidx8) 17175 v.AuxInt = c 17176 v.Aux = sym 17177 v.AddArg(ptr) 17178 v.AddArg(idx) 17179 v.AddArg(mem) 17180 return true 17181 } 17182 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 17183 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 17184 for { 17185 c := v.AuxInt 17186 sym := v.Aux 17187 mem := v.Args[2] 17188 v_0 := v.Args[0] 17189 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 3 { 17190 break 17191 } 17192 idx := v_0.Args[0] 17193 ptr := v.Args[1] 17194 v.reset(OpAMD64MOVQloadidx8) 17195 v.AuxInt = c 17196 v.Aux = sym 17197 v.AddArg(ptr) 17198 v.AddArg(idx) 17199 v.AddArg(mem) 17200 return true 17201 } 17202 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 17203 // cond: is32Bit(c+d) 17204 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 17205 for { 17206 c := v.AuxInt 17207 sym := v.Aux 17208 mem := v.Args[2] 17209 v_0 := v.Args[0] 17210 if v_0.Op != OpAMD64ADDQconst { 17211 break 17212 } 17213 d := v_0.AuxInt 17214 ptr := v_0.Args[0] 17215 idx := v.Args[1] 17216 if !(is32Bit(c + d)) { 17217 break 17218 } 17219 v.reset(OpAMD64MOVQloadidx1) 17220 v.AuxInt = c + d 17221 v.Aux = sym 17222 v.AddArg(ptr) 17223 v.AddArg(idx) 17224 v.AddArg(mem) 17225 return true 17226 } 17227 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 17228 // cond: is32Bit(c+d) 17229 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 17230 for { 17231 c := v.AuxInt 17232 sym := v.Aux 17233 mem := v.Args[2] 17234 idx := v.Args[0] 17235 v_1 := v.Args[1] 17236 if v_1.Op != OpAMD64ADDQconst { 17237 break 17238 } 17239 d := v_1.AuxInt 17240 ptr := v_1.Args[0] 17241 if !(is32Bit(c + d)) { 17242 break 17243 } 17244 v.reset(OpAMD64MOVQloadidx1) 17245 v.AuxInt = c + d 17246 v.Aux = sym 17247 v.AddArg(ptr) 17248 v.AddArg(idx) 17249 v.AddArg(mem) 17250 return true 17251 } 17252 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 17253 // cond: is32Bit(c+d) 17254 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 17255 for { 17256 c := v.AuxInt 17257 sym := v.Aux 17258 mem := v.Args[2] 17259 ptr := v.Args[0] 17260 v_1 := v.Args[1] 17261 if v_1.Op != OpAMD64ADDQconst { 17262 break 17263 } 17264 d := v_1.AuxInt 17265 idx := v_1.Args[0] 17266 if !(is32Bit(c + d)) { 17267 break 17268 } 17269 v.reset(OpAMD64MOVQloadidx1) 17270 v.AuxInt = c + d 17271 v.Aux = sym 17272 v.AddArg(ptr) 17273 v.AddArg(idx) 17274 v.AddArg(mem) 17275 return true 17276 } 17277 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 17278 // cond: is32Bit(c+d) 17279 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 17280 for { 17281 c := v.AuxInt 17282 sym := v.Aux 17283 mem := v.Args[2] 17284 v_0 := v.Args[0] 17285 if v_0.Op != OpAMD64ADDQconst { 17286 break 17287 } 17288 d := v_0.AuxInt 17289 idx := v_0.Args[0] 17290 ptr := v.Args[1] 17291 if !(is32Bit(c + d)) { 17292 break 17293 } 17294 v.reset(OpAMD64MOVQloadidx1) 17295 v.AuxInt = c + d 17296 v.Aux = sym 17297 v.AddArg(ptr) 17298 v.AddArg(idx) 17299 v.AddArg(mem) 17300 return true 17301 } 17302 // match: (MOVQloadidx1 [i] {s} p (MOVQconst [c]) mem) 17303 // cond: is32Bit(i+c) 17304 // result: (MOVQload [i+c] {s} p mem) 17305 for { 17306 i := v.AuxInt 17307 s := v.Aux 17308 mem := v.Args[2] 17309 p := v.Args[0] 17310 v_1 := v.Args[1] 17311 if v_1.Op != OpAMD64MOVQconst { 17312 break 17313 } 17314 c := v_1.AuxInt 17315 if !(is32Bit(i + c)) { 17316 break 17317 } 17318 v.reset(OpAMD64MOVQload) 17319 v.AuxInt = i + c 17320 v.Aux = s 17321 v.AddArg(p) 17322 v.AddArg(mem) 17323 return true 17324 } 17325 // match: (MOVQloadidx1 [i] {s} (MOVQconst [c]) p mem) 17326 // cond: is32Bit(i+c) 17327 // result: (MOVQload [i+c] {s} p mem) 17328 for { 17329 i := v.AuxInt 17330 s := v.Aux 17331 mem := v.Args[2] 17332 v_0 := v.Args[0] 17333 if v_0.Op != OpAMD64MOVQconst { 17334 break 17335 } 17336 c := v_0.AuxInt 17337 p := v.Args[1] 17338 if !(is32Bit(i + c)) { 17339 break 17340 } 17341 v.reset(OpAMD64MOVQload) 17342 v.AuxInt = i + c 17343 v.Aux = s 17344 v.AddArg(p) 17345 v.AddArg(mem) 17346 return true 17347 } 17348 return false 17349 } 17350 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { 17351 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 17352 // cond: is32Bit(c+d) 17353 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 17354 for { 17355 c := v.AuxInt 17356 sym := v.Aux 17357 mem := v.Args[2] 17358 v_0 := v.Args[0] 17359 if v_0.Op != OpAMD64ADDQconst { 17360 break 17361 } 17362 d := v_0.AuxInt 17363 ptr := v_0.Args[0] 17364 idx := v.Args[1] 17365 if !(is32Bit(c + d)) { 17366 break 17367 } 17368 v.reset(OpAMD64MOVQloadidx8) 17369 v.AuxInt = c + d 17370 v.Aux = sym 17371 v.AddArg(ptr) 17372 v.AddArg(idx) 17373 v.AddArg(mem) 17374 return true 17375 } 17376 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 17377 // cond: is32Bit(c+8*d) 17378 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 17379 for { 17380 c := v.AuxInt 17381 sym := v.Aux 17382 mem := v.Args[2] 17383 ptr := v.Args[0] 17384 v_1 := v.Args[1] 17385 if v_1.Op != OpAMD64ADDQconst { 17386 break 17387 } 17388 d := v_1.AuxInt 17389 idx := v_1.Args[0] 17390 if !(is32Bit(c + 8*d)) { 17391 break 17392 } 17393 v.reset(OpAMD64MOVQloadidx8) 17394 v.AuxInt = c + 8*d 17395 v.Aux = sym 17396 v.AddArg(ptr) 17397 v.AddArg(idx) 17398 v.AddArg(mem) 17399 return true 17400 } 17401 // match: (MOVQloadidx8 [i] {s} p (MOVQconst [c]) mem) 17402 // cond: is32Bit(i+8*c) 17403 // result: (MOVQload [i+8*c] {s} p mem) 17404 for { 17405 i := v.AuxInt 17406 s := v.Aux 17407 mem := v.Args[2] 17408 p := v.Args[0] 17409 v_1 := v.Args[1] 17410 if v_1.Op != OpAMD64MOVQconst { 17411 break 17412 } 17413 c := v_1.AuxInt 17414 if !(is32Bit(i + 8*c)) { 17415 break 17416 } 17417 v.reset(OpAMD64MOVQload) 17418 v.AuxInt = i + 8*c 17419 v.Aux = s 17420 v.AddArg(p) 17421 v.AddArg(mem) 17422 return true 17423 } 17424 return false 17425 } 17426 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { 17427 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 17428 // cond: is32Bit(off1+off2) 17429 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 17430 for { 17431 off1 := v.AuxInt 17432 sym := v.Aux 17433 mem := v.Args[2] 17434 v_0 := v.Args[0] 17435 if v_0.Op != OpAMD64ADDQconst { 17436 break 17437 } 17438 off2 := v_0.AuxInt 17439 ptr := v_0.Args[0] 17440 val := v.Args[1] 17441 if !(is32Bit(off1 + off2)) { 17442 break 17443 } 17444 v.reset(OpAMD64MOVQstore) 17445 v.AuxInt = off1 + off2 17446 v.Aux = sym 17447 v.AddArg(ptr) 17448 v.AddArg(val) 17449 v.AddArg(mem) 17450 return true 17451 } 17452 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 17453 // cond: validValAndOff(c,off) 17454 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 17455 for { 17456 off := v.AuxInt 17457 sym := v.Aux 17458 mem := v.Args[2] 17459 ptr := v.Args[0] 17460 v_1 := v.Args[1] 17461 if v_1.Op != OpAMD64MOVQconst { 17462 break 17463 } 17464 c := v_1.AuxInt 17465 if !(validValAndOff(c, off)) { 17466 break 17467 } 17468 v.reset(OpAMD64MOVQstoreconst) 17469 v.AuxInt = makeValAndOff(c, off) 17470 v.Aux = sym 17471 v.AddArg(ptr) 17472 v.AddArg(mem) 17473 return true 17474 } 17475 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 17476 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 17477 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 17478 for { 17479 off1 := v.AuxInt 17480 sym1 := v.Aux 17481 mem := v.Args[2] 17482 v_0 := v.Args[0] 17483 if v_0.Op != OpAMD64LEAQ { 17484 break 17485 } 17486 off2 := v_0.AuxInt 17487 sym2 := v_0.Aux 17488 base := v_0.Args[0] 17489 val := v.Args[1] 17490 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 17491 break 17492 } 17493 v.reset(OpAMD64MOVQstore) 17494 v.AuxInt = off1 + off2 17495 v.Aux = mergeSym(sym1, sym2) 17496 v.AddArg(base) 17497 v.AddArg(val) 17498 v.AddArg(mem) 17499 return true 17500 } 17501 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 17502 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 17503 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 17504 for { 17505 off1 := v.AuxInt 17506 sym1 := v.Aux 17507 mem := v.Args[2] 17508 v_0 := v.Args[0] 17509 if v_0.Op != OpAMD64LEAQ1 { 17510 break 17511 } 17512 off2 := v_0.AuxInt 17513 sym2 := v_0.Aux 17514 idx := v_0.Args[1] 17515 ptr := v_0.Args[0] 17516 val := v.Args[1] 17517 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 17518 break 17519 } 17520 v.reset(OpAMD64MOVQstoreidx1) 17521 v.AuxInt = off1 + off2 17522 v.Aux = mergeSym(sym1, sym2) 17523 v.AddArg(ptr) 17524 v.AddArg(idx) 17525 v.AddArg(val) 17526 v.AddArg(mem) 17527 return true 17528 } 17529 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 17530 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 17531 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 17532 for { 17533 off1 := v.AuxInt 17534 sym1 := v.Aux 17535 mem := v.Args[2] 17536 v_0 := v.Args[0] 17537 if v_0.Op != OpAMD64LEAQ8 { 17538 break 17539 } 17540 off2 := v_0.AuxInt 17541 sym2 := v_0.Aux 17542 idx := v_0.Args[1] 17543 ptr := v_0.Args[0] 17544 val := v.Args[1] 17545 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 17546 break 17547 } 17548 v.reset(OpAMD64MOVQstoreidx8) 17549 v.AuxInt = off1 + off2 17550 v.Aux = mergeSym(sym1, sym2) 17551 v.AddArg(ptr) 17552 v.AddArg(idx) 17553 v.AddArg(val) 17554 v.AddArg(mem) 17555 return true 17556 } 17557 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 17558 // cond: ptr.Op != OpSB 17559 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 17560 for { 17561 off := v.AuxInt 17562 sym := v.Aux 17563 mem := v.Args[2] 17564 v_0 := v.Args[0] 17565 if v_0.Op != OpAMD64ADDQ { 17566 break 17567 } 17568 idx := v_0.Args[1] 17569 ptr := v_0.Args[0] 17570 val := v.Args[1] 17571 if !(ptr.Op != OpSB) { 17572 break 17573 } 17574 v.reset(OpAMD64MOVQstoreidx1) 17575 v.AuxInt = off 17576 v.Aux = sym 17577 v.AddArg(ptr) 17578 v.AddArg(idx) 17579 v.AddArg(val) 17580 v.AddArg(mem) 17581 return true 17582 } 17583 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 17584 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 17585 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 17586 for { 17587 off1 := v.AuxInt 17588 sym1 := v.Aux 17589 mem := v.Args[2] 17590 v_0 := v.Args[0] 17591 if v_0.Op != OpAMD64LEAL { 17592 break 17593 } 17594 off2 := v_0.AuxInt 17595 sym2 := v_0.Aux 17596 base := v_0.Args[0] 17597 val := v.Args[1] 17598 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 17599 break 17600 } 17601 v.reset(OpAMD64MOVQstore) 17602 v.AuxInt = off1 + off2 17603 v.Aux = mergeSym(sym1, sym2) 17604 v.AddArg(base) 17605 v.AddArg(val) 17606 v.AddArg(mem) 17607 return true 17608 } 17609 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 17610 // cond: is32Bit(off1+off2) 17611 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 17612 for { 17613 off1 := v.AuxInt 17614 sym := v.Aux 17615 mem := v.Args[2] 17616 v_0 := v.Args[0] 17617 if v_0.Op != OpAMD64ADDLconst { 17618 break 17619 } 17620 off2 := v_0.AuxInt 17621 ptr := v_0.Args[0] 17622 val := v.Args[1] 17623 if !(is32Bit(off1 + off2)) { 17624 break 17625 } 17626 v.reset(OpAMD64MOVQstore) 17627 v.AuxInt = off1 + off2 17628 v.Aux = sym 17629 v.AddArg(ptr) 17630 v.AddArg(val) 17631 v.AddArg(mem) 17632 return true 17633 } 17634 // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem) 17635 // cond: y.Uses==1 && clobber(y) 17636 // result: (ADDQmodify [off] {sym} ptr x mem) 17637 for { 17638 off := v.AuxInt 17639 sym := v.Aux 17640 mem := v.Args[2] 17641 ptr := v.Args[0] 17642 y := v.Args[1] 17643 if y.Op != OpAMD64ADDQload || y.AuxInt != off || y.Aux != sym { 17644 break 17645 } 17646 _ = y.Args[2] 17647 x := y.Args[0] 17648 if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { 17649 break 17650 } 17651 v.reset(OpAMD64ADDQmodify) 17652 v.AuxInt = off 17653 v.Aux = sym 17654 v.AddArg(ptr) 17655 v.AddArg(x) 17656 v.AddArg(mem) 17657 return true 17658 } 17659 // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem) 17660 // cond: y.Uses==1 && clobber(y) 17661 // result: (ANDQmodify [off] {sym} ptr x mem) 17662 for { 17663 off := v.AuxInt 17664 sym := v.Aux 17665 mem := v.Args[2] 17666 ptr := v.Args[0] 17667 y := v.Args[1] 17668 if y.Op != OpAMD64ANDQload || y.AuxInt != off || y.Aux != sym { 17669 break 17670 } 17671 _ = y.Args[2] 17672 x := y.Args[0] 17673 if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { 17674 break 17675 } 17676 v.reset(OpAMD64ANDQmodify) 17677 v.AuxInt = off 17678 v.Aux = sym 17679 v.AddArg(ptr) 17680 v.AddArg(x) 17681 v.AddArg(mem) 17682 return true 17683 } 17684 return false 17685 } 17686 func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { 17687 // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem) 17688 // cond: y.Uses==1 && clobber(y) 17689 // result: (ORQmodify [off] {sym} ptr x mem) 17690 for { 17691 off := v.AuxInt 17692 sym := v.Aux 17693 mem := v.Args[2] 17694 ptr := v.Args[0] 17695 y := v.Args[1] 17696 if y.Op != OpAMD64ORQload || y.AuxInt != off || y.Aux != sym { 17697 break 17698 } 17699 _ = y.Args[2] 17700 x := y.Args[0] 17701 if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { 17702 break 17703 } 17704 v.reset(OpAMD64ORQmodify) 17705 v.AuxInt = off 17706 v.Aux = sym 17707 v.AddArg(ptr) 17708 v.AddArg(x) 17709 v.AddArg(mem) 17710 return true 17711 } 17712 // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem) 17713 // cond: y.Uses==1 && clobber(y) 17714 // result: (XORQmodify [off] {sym} ptr x mem) 17715 for { 17716 off := v.AuxInt 17717 sym := v.Aux 17718 mem := v.Args[2] 17719 ptr := v.Args[0] 17720 y := v.Args[1] 17721 if y.Op != OpAMD64XORQload || y.AuxInt != off || y.Aux != sym { 17722 break 17723 } 17724 _ = y.Args[2] 17725 x := y.Args[0] 17726 if ptr != y.Args[1] || mem != y.Args[2] || !(y.Uses == 1 && clobber(y)) { 17727 break 17728 } 17729 v.reset(OpAMD64XORQmodify) 17730 v.AuxInt = off 17731 v.Aux = sym 17732 v.AddArg(ptr) 17733 v.AddArg(x) 17734 v.AddArg(mem) 17735 return true 17736 } 17737 // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem) 17738 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17739 // result: (ADDQmodify [off] {sym} ptr x mem) 17740 for { 17741 off := v.AuxInt 17742 sym := v.Aux 17743 mem := v.Args[2] 17744 ptr := v.Args[0] 17745 y := v.Args[1] 17746 if y.Op != OpAMD64ADDQ { 17747 break 17748 } 17749 x := y.Args[1] 17750 l := y.Args[0] 17751 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 17752 break 17753 } 17754 _ = l.Args[1] 17755 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17756 break 17757 } 17758 v.reset(OpAMD64ADDQmodify) 17759 v.AuxInt = off 17760 v.Aux = sym 17761 v.AddArg(ptr) 17762 v.AddArg(x) 17763 v.AddArg(mem) 17764 return true 17765 } 17766 // match: (MOVQstore {sym} [off] ptr y:(ADDQ x l:(MOVQload [off] {sym} ptr mem)) mem) 17767 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17768 // result: (ADDQmodify [off] {sym} ptr x mem) 17769 for { 17770 off := v.AuxInt 17771 sym := v.Aux 17772 mem := v.Args[2] 17773 ptr := v.Args[0] 17774 y := v.Args[1] 17775 if y.Op != OpAMD64ADDQ { 17776 break 17777 } 17778 _ = y.Args[1] 17779 x := y.Args[0] 17780 l := y.Args[1] 17781 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 17782 break 17783 } 17784 _ = l.Args[1] 17785 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17786 break 17787 } 17788 v.reset(OpAMD64ADDQmodify) 17789 v.AuxInt = off 17790 v.Aux = sym 17791 v.AddArg(ptr) 17792 v.AddArg(x) 17793 v.AddArg(mem) 17794 return true 17795 } 17796 // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem) 17797 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17798 // result: (SUBQmodify [off] {sym} ptr x mem) 17799 for { 17800 off := v.AuxInt 17801 sym := v.Aux 17802 mem := v.Args[2] 17803 ptr := v.Args[0] 17804 y := v.Args[1] 17805 if y.Op != OpAMD64SUBQ { 17806 break 17807 } 17808 x := y.Args[1] 17809 l := y.Args[0] 17810 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 17811 break 17812 } 17813 _ = l.Args[1] 17814 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17815 break 17816 } 17817 v.reset(OpAMD64SUBQmodify) 17818 v.AuxInt = off 17819 v.Aux = sym 17820 v.AddArg(ptr) 17821 v.AddArg(x) 17822 v.AddArg(mem) 17823 return true 17824 } 17825 // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem) 17826 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17827 // result: (ANDQmodify [off] {sym} ptr x mem) 17828 for { 17829 off := v.AuxInt 17830 sym := v.Aux 17831 mem := v.Args[2] 17832 ptr := v.Args[0] 17833 y := v.Args[1] 17834 if y.Op != OpAMD64ANDQ { 17835 break 17836 } 17837 x := y.Args[1] 17838 l := y.Args[0] 17839 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 17840 break 17841 } 17842 _ = l.Args[1] 17843 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17844 break 17845 } 17846 v.reset(OpAMD64ANDQmodify) 17847 v.AuxInt = off 17848 v.Aux = sym 17849 v.AddArg(ptr) 17850 v.AddArg(x) 17851 v.AddArg(mem) 17852 return true 17853 } 17854 // match: (MOVQstore {sym} [off] ptr y:(ANDQ x l:(MOVQload [off] {sym} ptr mem)) mem) 17855 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17856 // result: (ANDQmodify [off] {sym} ptr x mem) 17857 for { 17858 off := v.AuxInt 17859 sym := v.Aux 17860 mem := v.Args[2] 17861 ptr := v.Args[0] 17862 y := v.Args[1] 17863 if y.Op != OpAMD64ANDQ { 17864 break 17865 } 17866 _ = y.Args[1] 17867 x := y.Args[0] 17868 l := y.Args[1] 17869 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 17870 break 17871 } 17872 _ = l.Args[1] 17873 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17874 break 17875 } 17876 v.reset(OpAMD64ANDQmodify) 17877 v.AuxInt = off 17878 v.Aux = sym 17879 v.AddArg(ptr) 17880 v.AddArg(x) 17881 v.AddArg(mem) 17882 return true 17883 } 17884 // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem) 17885 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17886 // result: (ORQmodify [off] {sym} ptr x mem) 17887 for { 17888 off := v.AuxInt 17889 sym := v.Aux 17890 mem := v.Args[2] 17891 ptr := v.Args[0] 17892 y := v.Args[1] 17893 if y.Op != OpAMD64ORQ { 17894 break 17895 } 17896 x := y.Args[1] 17897 l := y.Args[0] 17898 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 17899 break 17900 } 17901 _ = l.Args[1] 17902 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17903 break 17904 } 17905 v.reset(OpAMD64ORQmodify) 17906 v.AuxInt = off 17907 v.Aux = sym 17908 v.AddArg(ptr) 17909 v.AddArg(x) 17910 v.AddArg(mem) 17911 return true 17912 } 17913 // match: (MOVQstore {sym} [off] ptr y:(ORQ x l:(MOVQload [off] {sym} ptr mem)) mem) 17914 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17915 // result: (ORQmodify [off] {sym} ptr x mem) 17916 for { 17917 off := v.AuxInt 17918 sym := v.Aux 17919 mem := v.Args[2] 17920 ptr := v.Args[0] 17921 y := v.Args[1] 17922 if y.Op != OpAMD64ORQ { 17923 break 17924 } 17925 _ = y.Args[1] 17926 x := y.Args[0] 17927 l := y.Args[1] 17928 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 17929 break 17930 } 17931 _ = l.Args[1] 17932 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17933 break 17934 } 17935 v.reset(OpAMD64ORQmodify) 17936 v.AuxInt = off 17937 v.Aux = sym 17938 v.AddArg(ptr) 17939 v.AddArg(x) 17940 v.AddArg(mem) 17941 return true 17942 } 17943 // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem) 17944 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17945 // result: (XORQmodify [off] {sym} ptr x mem) 17946 for { 17947 off := v.AuxInt 17948 sym := v.Aux 17949 mem := v.Args[2] 17950 ptr := v.Args[0] 17951 y := v.Args[1] 17952 if y.Op != OpAMD64XORQ { 17953 break 17954 } 17955 x := y.Args[1] 17956 l := y.Args[0] 17957 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 17958 break 17959 } 17960 _ = l.Args[1] 17961 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17962 break 17963 } 17964 v.reset(OpAMD64XORQmodify) 17965 v.AuxInt = off 17966 v.Aux = sym 17967 v.AddArg(ptr) 17968 v.AddArg(x) 17969 v.AddArg(mem) 17970 return true 17971 } 17972 return false 17973 } 17974 func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { 17975 // match: (MOVQstore {sym} [off] ptr y:(XORQ x l:(MOVQload [off] {sym} ptr mem)) mem) 17976 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 17977 // result: (XORQmodify [off] {sym} ptr x mem) 17978 for { 17979 off := v.AuxInt 17980 sym := v.Aux 17981 mem := v.Args[2] 17982 ptr := v.Args[0] 17983 y := v.Args[1] 17984 if y.Op != OpAMD64XORQ { 17985 break 17986 } 17987 _ = y.Args[1] 17988 x := y.Args[0] 17989 l := y.Args[1] 17990 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 17991 break 17992 } 17993 _ = l.Args[1] 17994 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 17995 break 17996 } 17997 v.reset(OpAMD64XORQmodify) 17998 v.AuxInt = off 17999 v.Aux = sym 18000 v.AddArg(ptr) 18001 v.AddArg(x) 18002 v.AddArg(mem) 18003 return true 18004 } 18005 // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) x) mem) 18006 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 18007 // result: (BTCQmodify [off] {sym} ptr x mem) 18008 for { 18009 off := v.AuxInt 18010 sym := v.Aux 18011 mem := v.Args[2] 18012 ptr := v.Args[0] 18013 y := v.Args[1] 18014 if y.Op != OpAMD64BTCQ { 18015 break 18016 } 18017 x := y.Args[1] 18018 l := y.Args[0] 18019 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 18020 break 18021 } 18022 _ = l.Args[1] 18023 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 18024 break 18025 } 18026 v.reset(OpAMD64BTCQmodify) 18027 v.AuxInt = off 18028 v.Aux = sym 18029 v.AddArg(ptr) 18030 v.AddArg(x) 18031 v.AddArg(mem) 18032 return true 18033 } 18034 // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem) 18035 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 18036 // result: (BTRQmodify [off] {sym} ptr x mem) 18037 for { 18038 off := v.AuxInt 18039 sym := v.Aux 18040 mem := v.Args[2] 18041 ptr := v.Args[0] 18042 y := v.Args[1] 18043 if y.Op != OpAMD64BTRQ { 18044 break 18045 } 18046 x := y.Args[1] 18047 l := y.Args[0] 18048 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 18049 break 18050 } 18051 _ = l.Args[1] 18052 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 18053 break 18054 } 18055 v.reset(OpAMD64BTRQmodify) 18056 v.AuxInt = off 18057 v.Aux = sym 18058 v.AddArg(ptr) 18059 v.AddArg(x) 18060 v.AddArg(mem) 18061 return true 18062 } 18063 // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem) 18064 // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) 18065 // result: (BTSQmodify [off] {sym} ptr x mem) 18066 for { 18067 off := v.AuxInt 18068 sym := v.Aux 18069 mem := v.Args[2] 18070 ptr := v.Args[0] 18071 y := v.Args[1] 18072 if y.Op != OpAMD64BTSQ { 18073 break 18074 } 18075 x := y.Args[1] 18076 l := y.Args[0] 18077 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 18078 break 18079 } 18080 _ = l.Args[1] 18081 if ptr != l.Args[0] || mem != l.Args[1] || !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { 18082 break 18083 } 18084 v.reset(OpAMD64BTSQmodify) 18085 v.AuxInt = off 18086 v.Aux = sym 18087 v.AddArg(ptr) 18088 v.AddArg(x) 18089 v.AddArg(mem) 18090 return true 18091 } 18092 // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 18093 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 18094 // result: (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 18095 for { 18096 off := v.AuxInt 18097 sym := v.Aux 18098 mem := v.Args[2] 18099 ptr := v.Args[0] 18100 a := v.Args[1] 18101 if a.Op != OpAMD64ADDQconst { 18102 break 18103 } 18104 c := a.AuxInt 18105 l := a.Args[0] 18106 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 18107 break 18108 } 18109 _ = l.Args[1] 18110 ptr2 := l.Args[0] 18111 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 18112 break 18113 } 18114 v.reset(OpAMD64ADDQconstmodify) 18115 v.AuxInt = makeValAndOff(c, off) 18116 v.Aux = sym 18117 v.AddArg(ptr) 18118 v.AddArg(mem) 18119 return true 18120 } 18121 // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 18122 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 18123 // result: (ANDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 18124 for { 18125 off := v.AuxInt 18126 sym := v.Aux 18127 mem := v.Args[2] 18128 ptr := v.Args[0] 18129 a := v.Args[1] 18130 if a.Op != OpAMD64ANDQconst { 18131 break 18132 } 18133 c := a.AuxInt 18134 l := a.Args[0] 18135 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 18136 break 18137 } 18138 _ = l.Args[1] 18139 ptr2 := l.Args[0] 18140 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 18141 break 18142 } 18143 v.reset(OpAMD64ANDQconstmodify) 18144 v.AuxInt = makeValAndOff(c, off) 18145 v.Aux = sym 18146 v.AddArg(ptr) 18147 v.AddArg(mem) 18148 return true 18149 } 18150 // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 18151 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 18152 // result: (ORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 18153 for { 18154 off := v.AuxInt 18155 sym := v.Aux 18156 mem := v.Args[2] 18157 ptr := v.Args[0] 18158 a := v.Args[1] 18159 if a.Op != OpAMD64ORQconst { 18160 break 18161 } 18162 c := a.AuxInt 18163 l := a.Args[0] 18164 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 18165 break 18166 } 18167 _ = l.Args[1] 18168 ptr2 := l.Args[0] 18169 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 18170 break 18171 } 18172 v.reset(OpAMD64ORQconstmodify) 18173 v.AuxInt = makeValAndOff(c, off) 18174 v.Aux = sym 18175 v.AddArg(ptr) 18176 v.AddArg(mem) 18177 return true 18178 } 18179 // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 18180 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 18181 // result: (XORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 18182 for { 18183 off := v.AuxInt 18184 sym := v.Aux 18185 mem := v.Args[2] 18186 ptr := v.Args[0] 18187 a := v.Args[1] 18188 if a.Op != OpAMD64XORQconst { 18189 break 18190 } 18191 c := a.AuxInt 18192 l := a.Args[0] 18193 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 18194 break 18195 } 18196 _ = l.Args[1] 18197 ptr2 := l.Args[0] 18198 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 18199 break 18200 } 18201 v.reset(OpAMD64XORQconstmodify) 18202 v.AuxInt = makeValAndOff(c, off) 18203 v.Aux = sym 18204 v.AddArg(ptr) 18205 v.AddArg(mem) 18206 return true 18207 } 18208 // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 18209 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 18210 // result: (BTCQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 18211 for { 18212 off := v.AuxInt 18213 sym := v.Aux 18214 mem := v.Args[2] 18215 ptr := v.Args[0] 18216 a := v.Args[1] 18217 if a.Op != OpAMD64BTCQconst { 18218 break 18219 } 18220 c := a.AuxInt 18221 l := a.Args[0] 18222 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 18223 break 18224 } 18225 _ = l.Args[1] 18226 ptr2 := l.Args[0] 18227 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 18228 break 18229 } 18230 v.reset(OpAMD64BTCQconstmodify) 18231 v.AuxInt = makeValAndOff(c, off) 18232 v.Aux = sym 18233 v.AddArg(ptr) 18234 v.AddArg(mem) 18235 return true 18236 } 18237 // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 18238 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 18239 // result: (BTRQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 18240 for { 18241 off := v.AuxInt 18242 sym := v.Aux 18243 mem := v.Args[2] 18244 ptr := v.Args[0] 18245 a := v.Args[1] 18246 if a.Op != OpAMD64BTRQconst { 18247 break 18248 } 18249 c := a.AuxInt 18250 l := a.Args[0] 18251 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 18252 break 18253 } 18254 _ = l.Args[1] 18255 ptr2 := l.Args[0] 18256 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 18257 break 18258 } 18259 v.reset(OpAMD64BTRQconstmodify) 18260 v.AuxInt = makeValAndOff(c, off) 18261 v.Aux = sym 18262 v.AddArg(ptr) 18263 v.AddArg(mem) 18264 return true 18265 } 18266 return false 18267 } 18268 func rewriteValueAMD64_OpAMD64MOVQstore_30(v *Value) bool { 18269 // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 18270 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) 18271 // result: (BTSQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) 18272 for { 18273 off := v.AuxInt 18274 sym := v.Aux 18275 mem := v.Args[2] 18276 ptr := v.Args[0] 18277 a := v.Args[1] 18278 if a.Op != OpAMD64BTSQconst { 18279 break 18280 } 18281 c := a.AuxInt 18282 l := a.Args[0] 18283 if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { 18284 break 18285 } 18286 _ = l.Args[1] 18287 ptr2 := l.Args[0] 18288 if mem != l.Args[1] || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { 18289 break 18290 } 18291 v.reset(OpAMD64BTSQconstmodify) 18292 v.AuxInt = makeValAndOff(c, off) 18293 v.Aux = sym 18294 v.AddArg(ptr) 18295 v.AddArg(mem) 18296 return true 18297 } 18298 // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) 18299 // result: (MOVSDstore [off] {sym} ptr val mem) 18300 for { 18301 off := v.AuxInt 18302 sym := v.Aux 18303 mem := v.Args[2] 18304 ptr := v.Args[0] 18305 v_1 := v.Args[1] 18306 if v_1.Op != OpAMD64MOVQf2i { 18307 break 18308 } 18309 val := v_1.Args[0] 18310 v.reset(OpAMD64MOVSDstore) 18311 v.AuxInt = off 18312 v.Aux = sym 18313 v.AddArg(ptr) 18314 v.AddArg(val) 18315 v.AddArg(mem) 18316 return true 18317 } 18318 return false 18319 } 18320 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { 18321 b := v.Block 18322 config := b.Func.Config 18323 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 18324 // cond: ValAndOff(sc).canAdd(off) 18325 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 18326 for { 18327 sc := v.AuxInt 18328 s := v.Aux 18329 mem := v.Args[1] 18330 v_0 := v.Args[0] 18331 if v_0.Op != OpAMD64ADDQconst { 18332 break 18333 } 18334 off := v_0.AuxInt 18335 ptr := v_0.Args[0] 18336 if !(ValAndOff(sc).canAdd(off)) { 18337 break 18338 } 18339 v.reset(OpAMD64MOVQstoreconst) 18340 v.AuxInt = ValAndOff(sc).add(off) 18341 v.Aux = s 18342 v.AddArg(ptr) 18343 v.AddArg(mem) 18344 return true 18345 } 18346 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 18347 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 18348 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 18349 for { 18350 sc := v.AuxInt 18351 sym1 := v.Aux 18352 mem := v.Args[1] 18353 v_0 := v.Args[0] 18354 if v_0.Op != OpAMD64LEAQ { 18355 break 18356 } 18357 off := v_0.AuxInt 18358 sym2 := v_0.Aux 18359 ptr := v_0.Args[0] 18360 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 18361 break 18362 } 18363 v.reset(OpAMD64MOVQstoreconst) 18364 v.AuxInt = ValAndOff(sc).add(off) 18365 v.Aux = mergeSym(sym1, sym2) 18366 v.AddArg(ptr) 18367 v.AddArg(mem) 18368 return true 18369 } 18370 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 18371 // cond: canMergeSym(sym1, sym2) 18372 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 18373 for { 18374 x := v.AuxInt 18375 sym1 := v.Aux 18376 mem := v.Args[1] 18377 v_0 := v.Args[0] 18378 if v_0.Op != OpAMD64LEAQ1 { 18379 break 18380 } 18381 off := v_0.AuxInt 18382 sym2 := v_0.Aux 18383 idx := v_0.Args[1] 18384 ptr := v_0.Args[0] 18385 if !(canMergeSym(sym1, sym2)) { 18386 break 18387 } 18388 v.reset(OpAMD64MOVQstoreconstidx1) 18389 v.AuxInt = ValAndOff(x).add(off) 18390 v.Aux = mergeSym(sym1, sym2) 18391 v.AddArg(ptr) 18392 v.AddArg(idx) 18393 v.AddArg(mem) 18394 return true 18395 } 18396 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 18397 // cond: canMergeSym(sym1, sym2) 18398 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 18399 for { 18400 x := v.AuxInt 18401 sym1 := v.Aux 18402 mem := v.Args[1] 18403 v_0 := v.Args[0] 18404 if v_0.Op != OpAMD64LEAQ8 { 18405 break 18406 } 18407 off := v_0.AuxInt 18408 sym2 := v_0.Aux 18409 idx := v_0.Args[1] 18410 ptr := v_0.Args[0] 18411 if !(canMergeSym(sym1, sym2)) { 18412 break 18413 } 18414 v.reset(OpAMD64MOVQstoreconstidx8) 18415 v.AuxInt = ValAndOff(x).add(off) 18416 v.Aux = mergeSym(sym1, sym2) 18417 v.AddArg(ptr) 18418 v.AddArg(idx) 18419 v.AddArg(mem) 18420 return true 18421 } 18422 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 18423 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 18424 for { 18425 x := v.AuxInt 18426 sym := v.Aux 18427 mem := v.Args[1] 18428 v_0 := v.Args[0] 18429 if v_0.Op != OpAMD64ADDQ { 18430 break 18431 } 18432 idx := v_0.Args[1] 18433 ptr := v_0.Args[0] 18434 v.reset(OpAMD64MOVQstoreconstidx1) 18435 v.AuxInt = x 18436 v.Aux = sym 18437 v.AddArg(ptr) 18438 v.AddArg(idx) 18439 v.AddArg(mem) 18440 return true 18441 } 18442 // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) 18443 // cond: config.useSSE && x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x) 18444 // result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) 18445 for { 18446 c := v.AuxInt 18447 s := v.Aux 18448 _ = v.Args[1] 18449 p := v.Args[0] 18450 x := v.Args[1] 18451 if x.Op != OpAMD64MOVQstoreconst { 18452 break 18453 } 18454 c2 := x.AuxInt 18455 if x.Aux != s { 18456 break 18457 } 18458 mem := x.Args[1] 18459 if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) { 18460 break 18461 } 18462 v.reset(OpAMD64MOVOstore) 18463 v.AuxInt = ValAndOff(c2).Off() 18464 v.Aux = s 18465 v.AddArg(p) 18466 v0 := b.NewValue0(x.Pos, OpAMD64MOVOconst, types.TypeInt128) 18467 v0.AuxInt = 0 18468 v.AddArg(v0) 18469 v.AddArg(mem) 18470 return true 18471 } 18472 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 18473 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 18474 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 18475 for { 18476 sc := v.AuxInt 18477 sym1 := v.Aux 18478 mem := v.Args[1] 18479 v_0 := v.Args[0] 18480 if v_0.Op != OpAMD64LEAL { 18481 break 18482 } 18483 off := v_0.AuxInt 18484 sym2 := v_0.Aux 18485 ptr := v_0.Args[0] 18486 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 18487 break 18488 } 18489 v.reset(OpAMD64MOVQstoreconst) 18490 v.AuxInt = ValAndOff(sc).add(off) 18491 v.Aux = mergeSym(sym1, sym2) 18492 v.AddArg(ptr) 18493 v.AddArg(mem) 18494 return true 18495 } 18496 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 18497 // cond: ValAndOff(sc).canAdd(off) 18498 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 18499 for { 18500 sc := v.AuxInt 18501 s := v.Aux 18502 mem := v.Args[1] 18503 v_0 := v.Args[0] 18504 if v_0.Op != OpAMD64ADDLconst { 18505 break 18506 } 18507 off := v_0.AuxInt 18508 ptr := v_0.Args[0] 18509 if !(ValAndOff(sc).canAdd(off)) { 18510 break 18511 } 18512 v.reset(OpAMD64MOVQstoreconst) 18513 v.AuxInt = ValAndOff(sc).add(off) 18514 v.Aux = s 18515 v.AddArg(ptr) 18516 v.AddArg(mem) 18517 return true 18518 } 18519 return false 18520 } 18521 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { 18522 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 18523 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 18524 for { 18525 c := v.AuxInt 18526 sym := v.Aux 18527 mem := v.Args[2] 18528 ptr := v.Args[0] 18529 v_1 := v.Args[1] 18530 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { 18531 break 18532 } 18533 idx := v_1.Args[0] 18534 v.reset(OpAMD64MOVQstoreconstidx8) 18535 v.AuxInt = c 18536 v.Aux = sym 18537 v.AddArg(ptr) 18538 v.AddArg(idx) 18539 v.AddArg(mem) 18540 return true 18541 } 18542 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 18543 // cond: ValAndOff(x).canAdd(c) 18544 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 18545 for { 18546 x := v.AuxInt 18547 sym := v.Aux 18548 mem := v.Args[2] 18549 v_0 := v.Args[0] 18550 if v_0.Op != OpAMD64ADDQconst { 18551 break 18552 } 18553 c := v_0.AuxInt 18554 ptr := v_0.Args[0] 18555 idx := v.Args[1] 18556 if !(ValAndOff(x).canAdd(c)) { 18557 break 18558 } 18559 v.reset(OpAMD64MOVQstoreconstidx1) 18560 v.AuxInt = ValAndOff(x).add(c) 18561 v.Aux = sym 18562 v.AddArg(ptr) 18563 v.AddArg(idx) 18564 v.AddArg(mem) 18565 return true 18566 } 18567 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 18568 // cond: ValAndOff(x).canAdd(c) 18569 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 18570 for { 18571 x := v.AuxInt 18572 sym := v.Aux 18573 mem := v.Args[2] 18574 ptr := v.Args[0] 18575 v_1 := v.Args[1] 18576 if v_1.Op != OpAMD64ADDQconst { 18577 break 18578 } 18579 c := v_1.AuxInt 18580 idx := v_1.Args[0] 18581 if !(ValAndOff(x).canAdd(c)) { 18582 break 18583 } 18584 v.reset(OpAMD64MOVQstoreconstidx1) 18585 v.AuxInt = ValAndOff(x).add(c) 18586 v.Aux = sym 18587 v.AddArg(ptr) 18588 v.AddArg(idx) 18589 v.AddArg(mem) 18590 return true 18591 } 18592 return false 18593 } 18594 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { 18595 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 18596 // cond: ValAndOff(x).canAdd(c) 18597 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 18598 for { 18599 x := v.AuxInt 18600 sym := v.Aux 18601 mem := v.Args[2] 18602 v_0 := v.Args[0] 18603 if v_0.Op != OpAMD64ADDQconst { 18604 break 18605 } 18606 c := v_0.AuxInt 18607 ptr := v_0.Args[0] 18608 idx := v.Args[1] 18609 if !(ValAndOff(x).canAdd(c)) { 18610 break 18611 } 18612 v.reset(OpAMD64MOVQstoreconstidx8) 18613 v.AuxInt = ValAndOff(x).add(c) 18614 v.Aux = sym 18615 v.AddArg(ptr) 18616 v.AddArg(idx) 18617 v.AddArg(mem) 18618 return true 18619 } 18620 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 18621 // cond: ValAndOff(x).canAdd(8*c) 18622 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 18623 for { 18624 x := v.AuxInt 18625 sym := v.Aux 18626 mem := v.Args[2] 18627 ptr := v.Args[0] 18628 v_1 := v.Args[1] 18629 if v_1.Op != OpAMD64ADDQconst { 18630 break 18631 } 18632 c := v_1.AuxInt 18633 idx := v_1.Args[0] 18634 if !(ValAndOff(x).canAdd(8 * c)) { 18635 break 18636 } 18637 v.reset(OpAMD64MOVQstoreconstidx8) 18638 v.AuxInt = ValAndOff(x).add(8 * c) 18639 v.Aux = sym 18640 v.AddArg(ptr) 18641 v.AddArg(idx) 18642 v.AddArg(mem) 18643 return true 18644 } 18645 return false 18646 } 18647 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { 18648 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 18649 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 18650 for { 18651 c := v.AuxInt 18652 sym := v.Aux 18653 mem := v.Args[3] 18654 ptr := v.Args[0] 18655 v_1 := v.Args[1] 18656 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { 18657 break 18658 } 18659 idx := v_1.Args[0] 18660 val := v.Args[2] 18661 v.reset(OpAMD64MOVQstoreidx8) 18662 v.AuxInt = c 18663 v.Aux = sym 18664 v.AddArg(ptr) 18665 v.AddArg(idx) 18666 v.AddArg(val) 18667 v.AddArg(mem) 18668 return true 18669 } 18670 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 18671 // cond: is32Bit(c+d) 18672 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 18673 for { 18674 c := v.AuxInt 18675 sym := v.Aux 18676 mem := v.Args[3] 18677 v_0 := v.Args[0] 18678 if v_0.Op != OpAMD64ADDQconst { 18679 break 18680 } 18681 d := v_0.AuxInt 18682 ptr := v_0.Args[0] 18683 idx := v.Args[1] 18684 val := v.Args[2] 18685 if !(is32Bit(c + d)) { 18686 break 18687 } 18688 v.reset(OpAMD64MOVQstoreidx1) 18689 v.AuxInt = c + d 18690 v.Aux = sym 18691 v.AddArg(ptr) 18692 v.AddArg(idx) 18693 v.AddArg(val) 18694 v.AddArg(mem) 18695 return true 18696 } 18697 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 18698 // cond: is32Bit(c+d) 18699 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 18700 for { 18701 c := v.AuxInt 18702 sym := v.Aux 18703 mem := v.Args[3] 18704 ptr := v.Args[0] 18705 v_1 := v.Args[1] 18706 if v_1.Op != OpAMD64ADDQconst { 18707 break 18708 } 18709 d := v_1.AuxInt 18710 idx := v_1.Args[0] 18711 val := v.Args[2] 18712 if !(is32Bit(c + d)) { 18713 break 18714 } 18715 v.reset(OpAMD64MOVQstoreidx1) 18716 v.AuxInt = c + d 18717 v.Aux = sym 18718 v.AddArg(ptr) 18719 v.AddArg(idx) 18720 v.AddArg(val) 18721 v.AddArg(mem) 18722 return true 18723 } 18724 // match: (MOVQstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 18725 // cond: is32Bit(i+c) 18726 // result: (MOVQstore [i+c] {s} p w mem) 18727 for { 18728 i := v.AuxInt 18729 s := v.Aux 18730 mem := v.Args[3] 18731 p := v.Args[0] 18732 v_1 := v.Args[1] 18733 if v_1.Op != OpAMD64MOVQconst { 18734 break 18735 } 18736 c := v_1.AuxInt 18737 w := v.Args[2] 18738 if !(is32Bit(i + c)) { 18739 break 18740 } 18741 v.reset(OpAMD64MOVQstore) 18742 v.AuxInt = i + c 18743 v.Aux = s 18744 v.AddArg(p) 18745 v.AddArg(w) 18746 v.AddArg(mem) 18747 return true 18748 } 18749 return false 18750 } 18751 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { 18752 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 18753 // cond: is32Bit(c+d) 18754 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 18755 for { 18756 c := v.AuxInt 18757 sym := v.Aux 18758 mem := v.Args[3] 18759 v_0 := v.Args[0] 18760 if v_0.Op != OpAMD64ADDQconst { 18761 break 18762 } 18763 d := v_0.AuxInt 18764 ptr := v_0.Args[0] 18765 idx := v.Args[1] 18766 val := v.Args[2] 18767 if !(is32Bit(c + d)) { 18768 break 18769 } 18770 v.reset(OpAMD64MOVQstoreidx8) 18771 v.AuxInt = c + d 18772 v.Aux = sym 18773 v.AddArg(ptr) 18774 v.AddArg(idx) 18775 v.AddArg(val) 18776 v.AddArg(mem) 18777 return true 18778 } 18779 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 18780 // cond: is32Bit(c+8*d) 18781 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 18782 for { 18783 c := v.AuxInt 18784 sym := v.Aux 18785 mem := v.Args[3] 18786 ptr := v.Args[0] 18787 v_1 := v.Args[1] 18788 if v_1.Op != OpAMD64ADDQconst { 18789 break 18790 } 18791 d := v_1.AuxInt 18792 idx := v_1.Args[0] 18793 val := v.Args[2] 18794 if !(is32Bit(c + 8*d)) { 18795 break 18796 } 18797 v.reset(OpAMD64MOVQstoreidx8) 18798 v.AuxInt = c + 8*d 18799 v.Aux = sym 18800 v.AddArg(ptr) 18801 v.AddArg(idx) 18802 v.AddArg(val) 18803 v.AddArg(mem) 18804 return true 18805 } 18806 // match: (MOVQstoreidx8 [i] {s} p (MOVQconst [c]) w mem) 18807 // cond: is32Bit(i+8*c) 18808 // result: (MOVQstore [i+8*c] {s} p w mem) 18809 for { 18810 i := v.AuxInt 18811 s := v.Aux 18812 mem := v.Args[3] 18813 p := v.Args[0] 18814 v_1 := v.Args[1] 18815 if v_1.Op != OpAMD64MOVQconst { 18816 break 18817 } 18818 c := v_1.AuxInt 18819 w := v.Args[2] 18820 if !(is32Bit(i + 8*c)) { 18821 break 18822 } 18823 v.reset(OpAMD64MOVQstore) 18824 v.AuxInt = i + 8*c 18825 v.Aux = s 18826 v.AddArg(p) 18827 v.AddArg(w) 18828 v.AddArg(mem) 18829 return true 18830 } 18831 return false 18832 } 18833 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { 18834 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 18835 // cond: is32Bit(off1+off2) 18836 // result: (MOVSDload [off1+off2] {sym} ptr mem) 18837 for { 18838 off1 := v.AuxInt 18839 sym := v.Aux 18840 mem := v.Args[1] 18841 v_0 := v.Args[0] 18842 if v_0.Op != OpAMD64ADDQconst { 18843 break 18844 } 18845 off2 := v_0.AuxInt 18846 ptr := v_0.Args[0] 18847 if !(is32Bit(off1 + off2)) { 18848 break 18849 } 18850 v.reset(OpAMD64MOVSDload) 18851 v.AuxInt = off1 + off2 18852 v.Aux = sym 18853 v.AddArg(ptr) 18854 v.AddArg(mem) 18855 return true 18856 } 18857 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 18858 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18859 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 18860 for { 18861 off1 := v.AuxInt 18862 sym1 := v.Aux 18863 mem := v.Args[1] 18864 v_0 := v.Args[0] 18865 if v_0.Op != OpAMD64LEAQ { 18866 break 18867 } 18868 off2 := v_0.AuxInt 18869 sym2 := v_0.Aux 18870 base := v_0.Args[0] 18871 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18872 break 18873 } 18874 v.reset(OpAMD64MOVSDload) 18875 v.AuxInt = off1 + off2 18876 v.Aux = mergeSym(sym1, sym2) 18877 v.AddArg(base) 18878 v.AddArg(mem) 18879 return true 18880 } 18881 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 18882 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18883 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 18884 for { 18885 off1 := v.AuxInt 18886 sym1 := v.Aux 18887 mem := v.Args[1] 18888 v_0 := v.Args[0] 18889 if v_0.Op != OpAMD64LEAQ1 { 18890 break 18891 } 18892 off2 := v_0.AuxInt 18893 sym2 := v_0.Aux 18894 idx := v_0.Args[1] 18895 ptr := v_0.Args[0] 18896 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18897 break 18898 } 18899 v.reset(OpAMD64MOVSDloadidx1) 18900 v.AuxInt = off1 + off2 18901 v.Aux = mergeSym(sym1, sym2) 18902 v.AddArg(ptr) 18903 v.AddArg(idx) 18904 v.AddArg(mem) 18905 return true 18906 } 18907 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 18908 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 18909 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 18910 for { 18911 off1 := v.AuxInt 18912 sym1 := v.Aux 18913 mem := v.Args[1] 18914 v_0 := v.Args[0] 18915 if v_0.Op != OpAMD64LEAQ8 { 18916 break 18917 } 18918 off2 := v_0.AuxInt 18919 sym2 := v_0.Aux 18920 idx := v_0.Args[1] 18921 ptr := v_0.Args[0] 18922 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 18923 break 18924 } 18925 v.reset(OpAMD64MOVSDloadidx8) 18926 v.AuxInt = off1 + off2 18927 v.Aux = mergeSym(sym1, sym2) 18928 v.AddArg(ptr) 18929 v.AddArg(idx) 18930 v.AddArg(mem) 18931 return true 18932 } 18933 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 18934 // cond: ptr.Op != OpSB 18935 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 18936 for { 18937 off := v.AuxInt 18938 sym := v.Aux 18939 mem := v.Args[1] 18940 v_0 := v.Args[0] 18941 if v_0.Op != OpAMD64ADDQ { 18942 break 18943 } 18944 idx := v_0.Args[1] 18945 ptr := v_0.Args[0] 18946 if !(ptr.Op != OpSB) { 18947 break 18948 } 18949 v.reset(OpAMD64MOVSDloadidx1) 18950 v.AuxInt = off 18951 v.Aux = sym 18952 v.AddArg(ptr) 18953 v.AddArg(idx) 18954 v.AddArg(mem) 18955 return true 18956 } 18957 // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) 18958 // result: (MOVQi2f val) 18959 for { 18960 off := v.AuxInt 18961 sym := v.Aux 18962 _ = v.Args[1] 18963 ptr := v.Args[0] 18964 v_1 := v.Args[1] 18965 if v_1.Op != OpAMD64MOVQstore || v_1.AuxInt != off || v_1.Aux != sym { 18966 break 18967 } 18968 _ = v_1.Args[2] 18969 if ptr != v_1.Args[0] { 18970 break 18971 } 18972 val := v_1.Args[1] 18973 v.reset(OpAMD64MOVQi2f) 18974 v.AddArg(val) 18975 return true 18976 } 18977 return false 18978 } 18979 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { 18980 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 18981 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 18982 for { 18983 c := v.AuxInt 18984 sym := v.Aux 18985 mem := v.Args[2] 18986 ptr := v.Args[0] 18987 v_1 := v.Args[1] 18988 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { 18989 break 18990 } 18991 idx := v_1.Args[0] 18992 v.reset(OpAMD64MOVSDloadidx8) 18993 v.AuxInt = c 18994 v.Aux = sym 18995 v.AddArg(ptr) 18996 v.AddArg(idx) 18997 v.AddArg(mem) 18998 return true 18999 } 19000 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 19001 // cond: is32Bit(c+d) 19002 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 19003 for { 19004 c := v.AuxInt 19005 sym := v.Aux 19006 mem := v.Args[2] 19007 v_0 := v.Args[0] 19008 if v_0.Op != OpAMD64ADDQconst { 19009 break 19010 } 19011 d := v_0.AuxInt 19012 ptr := v_0.Args[0] 19013 idx := v.Args[1] 19014 if !(is32Bit(c + d)) { 19015 break 19016 } 19017 v.reset(OpAMD64MOVSDloadidx1) 19018 v.AuxInt = c + d 19019 v.Aux = sym 19020 v.AddArg(ptr) 19021 v.AddArg(idx) 19022 v.AddArg(mem) 19023 return true 19024 } 19025 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 19026 // cond: is32Bit(c+d) 19027 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 19028 for { 19029 c := v.AuxInt 19030 sym := v.Aux 19031 mem := v.Args[2] 19032 ptr := v.Args[0] 19033 v_1 := v.Args[1] 19034 if v_1.Op != OpAMD64ADDQconst { 19035 break 19036 } 19037 d := v_1.AuxInt 19038 idx := v_1.Args[0] 19039 if !(is32Bit(c + d)) { 19040 break 19041 } 19042 v.reset(OpAMD64MOVSDloadidx1) 19043 v.AuxInt = c + d 19044 v.Aux = sym 19045 v.AddArg(ptr) 19046 v.AddArg(idx) 19047 v.AddArg(mem) 19048 return true 19049 } 19050 // match: (MOVSDloadidx1 [i] {s} p (MOVQconst [c]) mem) 19051 // cond: is32Bit(i+c) 19052 // result: (MOVSDload [i+c] {s} p mem) 19053 for { 19054 i := v.AuxInt 19055 s := v.Aux 19056 mem := v.Args[2] 19057 p := v.Args[0] 19058 v_1 := v.Args[1] 19059 if v_1.Op != OpAMD64MOVQconst { 19060 break 19061 } 19062 c := v_1.AuxInt 19063 if !(is32Bit(i + c)) { 19064 break 19065 } 19066 v.reset(OpAMD64MOVSDload) 19067 v.AuxInt = i + c 19068 v.Aux = s 19069 v.AddArg(p) 19070 v.AddArg(mem) 19071 return true 19072 } 19073 return false 19074 } 19075 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { 19076 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 19077 // cond: is32Bit(c+d) 19078 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 19079 for { 19080 c := v.AuxInt 19081 sym := v.Aux 19082 mem := v.Args[2] 19083 v_0 := v.Args[0] 19084 if v_0.Op != OpAMD64ADDQconst { 19085 break 19086 } 19087 d := v_0.AuxInt 19088 ptr := v_0.Args[0] 19089 idx := v.Args[1] 19090 if !(is32Bit(c + d)) { 19091 break 19092 } 19093 v.reset(OpAMD64MOVSDloadidx8) 19094 v.AuxInt = c + d 19095 v.Aux = sym 19096 v.AddArg(ptr) 19097 v.AddArg(idx) 19098 v.AddArg(mem) 19099 return true 19100 } 19101 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 19102 // cond: is32Bit(c+8*d) 19103 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 19104 for { 19105 c := v.AuxInt 19106 sym := v.Aux 19107 mem := v.Args[2] 19108 ptr := v.Args[0] 19109 v_1 := v.Args[1] 19110 if v_1.Op != OpAMD64ADDQconst { 19111 break 19112 } 19113 d := v_1.AuxInt 19114 idx := v_1.Args[0] 19115 if !(is32Bit(c + 8*d)) { 19116 break 19117 } 19118 v.reset(OpAMD64MOVSDloadidx8) 19119 v.AuxInt = c + 8*d 19120 v.Aux = sym 19121 v.AddArg(ptr) 19122 v.AddArg(idx) 19123 v.AddArg(mem) 19124 return true 19125 } 19126 // match: (MOVSDloadidx8 [i] {s} p (MOVQconst [c]) mem) 19127 // cond: is32Bit(i+8*c) 19128 // result: (MOVSDload [i+8*c] {s} p mem) 19129 for { 19130 i := v.AuxInt 19131 s := v.Aux 19132 mem := v.Args[2] 19133 p := v.Args[0] 19134 v_1 := v.Args[1] 19135 if v_1.Op != OpAMD64MOVQconst { 19136 break 19137 } 19138 c := v_1.AuxInt 19139 if !(is32Bit(i + 8*c)) { 19140 break 19141 } 19142 v.reset(OpAMD64MOVSDload) 19143 v.AuxInt = i + 8*c 19144 v.Aux = s 19145 v.AddArg(p) 19146 v.AddArg(mem) 19147 return true 19148 } 19149 return false 19150 } 19151 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { 19152 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 19153 // cond: is32Bit(off1+off2) 19154 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 19155 for { 19156 off1 := v.AuxInt 19157 sym := v.Aux 19158 mem := v.Args[2] 19159 v_0 := v.Args[0] 19160 if v_0.Op != OpAMD64ADDQconst { 19161 break 19162 } 19163 off2 := v_0.AuxInt 19164 ptr := v_0.Args[0] 19165 val := v.Args[1] 19166 if !(is32Bit(off1 + off2)) { 19167 break 19168 } 19169 v.reset(OpAMD64MOVSDstore) 19170 v.AuxInt = off1 + off2 19171 v.Aux = sym 19172 v.AddArg(ptr) 19173 v.AddArg(val) 19174 v.AddArg(mem) 19175 return true 19176 } 19177 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 19178 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19179 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 19180 for { 19181 off1 := v.AuxInt 19182 sym1 := v.Aux 19183 mem := v.Args[2] 19184 v_0 := v.Args[0] 19185 if v_0.Op != OpAMD64LEAQ { 19186 break 19187 } 19188 off2 := v_0.AuxInt 19189 sym2 := v_0.Aux 19190 base := v_0.Args[0] 19191 val := v.Args[1] 19192 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19193 break 19194 } 19195 v.reset(OpAMD64MOVSDstore) 19196 v.AuxInt = off1 + off2 19197 v.Aux = mergeSym(sym1, sym2) 19198 v.AddArg(base) 19199 v.AddArg(val) 19200 v.AddArg(mem) 19201 return true 19202 } 19203 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 19204 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19205 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 19206 for { 19207 off1 := v.AuxInt 19208 sym1 := v.Aux 19209 mem := v.Args[2] 19210 v_0 := v.Args[0] 19211 if v_0.Op != OpAMD64LEAQ1 { 19212 break 19213 } 19214 off2 := v_0.AuxInt 19215 sym2 := v_0.Aux 19216 idx := v_0.Args[1] 19217 ptr := v_0.Args[0] 19218 val := v.Args[1] 19219 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19220 break 19221 } 19222 v.reset(OpAMD64MOVSDstoreidx1) 19223 v.AuxInt = off1 + off2 19224 v.Aux = mergeSym(sym1, sym2) 19225 v.AddArg(ptr) 19226 v.AddArg(idx) 19227 v.AddArg(val) 19228 v.AddArg(mem) 19229 return true 19230 } 19231 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 19232 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19233 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 19234 for { 19235 off1 := v.AuxInt 19236 sym1 := v.Aux 19237 mem := v.Args[2] 19238 v_0 := v.Args[0] 19239 if v_0.Op != OpAMD64LEAQ8 { 19240 break 19241 } 19242 off2 := v_0.AuxInt 19243 sym2 := v_0.Aux 19244 idx := v_0.Args[1] 19245 ptr := v_0.Args[0] 19246 val := v.Args[1] 19247 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19248 break 19249 } 19250 v.reset(OpAMD64MOVSDstoreidx8) 19251 v.AuxInt = off1 + off2 19252 v.Aux = mergeSym(sym1, sym2) 19253 v.AddArg(ptr) 19254 v.AddArg(idx) 19255 v.AddArg(val) 19256 v.AddArg(mem) 19257 return true 19258 } 19259 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 19260 // cond: ptr.Op != OpSB 19261 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 19262 for { 19263 off := v.AuxInt 19264 sym := v.Aux 19265 mem := v.Args[2] 19266 v_0 := v.Args[0] 19267 if v_0.Op != OpAMD64ADDQ { 19268 break 19269 } 19270 idx := v_0.Args[1] 19271 ptr := v_0.Args[0] 19272 val := v.Args[1] 19273 if !(ptr.Op != OpSB) { 19274 break 19275 } 19276 v.reset(OpAMD64MOVSDstoreidx1) 19277 v.AuxInt = off 19278 v.Aux = sym 19279 v.AddArg(ptr) 19280 v.AddArg(idx) 19281 v.AddArg(val) 19282 v.AddArg(mem) 19283 return true 19284 } 19285 // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) 19286 // result: (MOVQstore [off] {sym} ptr val mem) 19287 for { 19288 off := v.AuxInt 19289 sym := v.Aux 19290 mem := v.Args[2] 19291 ptr := v.Args[0] 19292 v_1 := v.Args[1] 19293 if v_1.Op != OpAMD64MOVQi2f { 19294 break 19295 } 19296 val := v_1.Args[0] 19297 v.reset(OpAMD64MOVQstore) 19298 v.AuxInt = off 19299 v.Aux = sym 19300 v.AddArg(ptr) 19301 v.AddArg(val) 19302 v.AddArg(mem) 19303 return true 19304 } 19305 return false 19306 } 19307 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { 19308 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 19309 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 19310 for { 19311 c := v.AuxInt 19312 sym := v.Aux 19313 mem := v.Args[3] 19314 ptr := v.Args[0] 19315 v_1 := v.Args[1] 19316 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 { 19317 break 19318 } 19319 idx := v_1.Args[0] 19320 val := v.Args[2] 19321 v.reset(OpAMD64MOVSDstoreidx8) 19322 v.AuxInt = c 19323 v.Aux = sym 19324 v.AddArg(ptr) 19325 v.AddArg(idx) 19326 v.AddArg(val) 19327 v.AddArg(mem) 19328 return true 19329 } 19330 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 19331 // cond: is32Bit(c+d) 19332 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 19333 for { 19334 c := v.AuxInt 19335 sym := v.Aux 19336 mem := v.Args[3] 19337 v_0 := v.Args[0] 19338 if v_0.Op != OpAMD64ADDQconst { 19339 break 19340 } 19341 d := v_0.AuxInt 19342 ptr := v_0.Args[0] 19343 idx := v.Args[1] 19344 val := v.Args[2] 19345 if !(is32Bit(c + d)) { 19346 break 19347 } 19348 v.reset(OpAMD64MOVSDstoreidx1) 19349 v.AuxInt = c + d 19350 v.Aux = sym 19351 v.AddArg(ptr) 19352 v.AddArg(idx) 19353 v.AddArg(val) 19354 v.AddArg(mem) 19355 return true 19356 } 19357 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 19358 // cond: is32Bit(c+d) 19359 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 19360 for { 19361 c := v.AuxInt 19362 sym := v.Aux 19363 mem := v.Args[3] 19364 ptr := v.Args[0] 19365 v_1 := v.Args[1] 19366 if v_1.Op != OpAMD64ADDQconst { 19367 break 19368 } 19369 d := v_1.AuxInt 19370 idx := v_1.Args[0] 19371 val := v.Args[2] 19372 if !(is32Bit(c + d)) { 19373 break 19374 } 19375 v.reset(OpAMD64MOVSDstoreidx1) 19376 v.AuxInt = c + d 19377 v.Aux = sym 19378 v.AddArg(ptr) 19379 v.AddArg(idx) 19380 v.AddArg(val) 19381 v.AddArg(mem) 19382 return true 19383 } 19384 // match: (MOVSDstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 19385 // cond: is32Bit(i+c) 19386 // result: (MOVSDstore [i+c] {s} p w mem) 19387 for { 19388 i := v.AuxInt 19389 s := v.Aux 19390 mem := v.Args[3] 19391 p := v.Args[0] 19392 v_1 := v.Args[1] 19393 if v_1.Op != OpAMD64MOVQconst { 19394 break 19395 } 19396 c := v_1.AuxInt 19397 w := v.Args[2] 19398 if !(is32Bit(i + c)) { 19399 break 19400 } 19401 v.reset(OpAMD64MOVSDstore) 19402 v.AuxInt = i + c 19403 v.Aux = s 19404 v.AddArg(p) 19405 v.AddArg(w) 19406 v.AddArg(mem) 19407 return true 19408 } 19409 return false 19410 } 19411 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { 19412 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 19413 // cond: is32Bit(c+d) 19414 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 19415 for { 19416 c := v.AuxInt 19417 sym := v.Aux 19418 mem := v.Args[3] 19419 v_0 := v.Args[0] 19420 if v_0.Op != OpAMD64ADDQconst { 19421 break 19422 } 19423 d := v_0.AuxInt 19424 ptr := v_0.Args[0] 19425 idx := v.Args[1] 19426 val := v.Args[2] 19427 if !(is32Bit(c + d)) { 19428 break 19429 } 19430 v.reset(OpAMD64MOVSDstoreidx8) 19431 v.AuxInt = c + d 19432 v.Aux = sym 19433 v.AddArg(ptr) 19434 v.AddArg(idx) 19435 v.AddArg(val) 19436 v.AddArg(mem) 19437 return true 19438 } 19439 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 19440 // cond: is32Bit(c+8*d) 19441 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 19442 for { 19443 c := v.AuxInt 19444 sym := v.Aux 19445 mem := v.Args[3] 19446 ptr := v.Args[0] 19447 v_1 := v.Args[1] 19448 if v_1.Op != OpAMD64ADDQconst { 19449 break 19450 } 19451 d := v_1.AuxInt 19452 idx := v_1.Args[0] 19453 val := v.Args[2] 19454 if !(is32Bit(c + 8*d)) { 19455 break 19456 } 19457 v.reset(OpAMD64MOVSDstoreidx8) 19458 v.AuxInt = c + 8*d 19459 v.Aux = sym 19460 v.AddArg(ptr) 19461 v.AddArg(idx) 19462 v.AddArg(val) 19463 v.AddArg(mem) 19464 return true 19465 } 19466 // match: (MOVSDstoreidx8 [i] {s} p (MOVQconst [c]) w mem) 19467 // cond: is32Bit(i+8*c) 19468 // result: (MOVSDstore [i+8*c] {s} p w mem) 19469 for { 19470 i := v.AuxInt 19471 s := v.Aux 19472 mem := v.Args[3] 19473 p := v.Args[0] 19474 v_1 := v.Args[1] 19475 if v_1.Op != OpAMD64MOVQconst { 19476 break 19477 } 19478 c := v_1.AuxInt 19479 w := v.Args[2] 19480 if !(is32Bit(i + 8*c)) { 19481 break 19482 } 19483 v.reset(OpAMD64MOVSDstore) 19484 v.AuxInt = i + 8*c 19485 v.Aux = s 19486 v.AddArg(p) 19487 v.AddArg(w) 19488 v.AddArg(mem) 19489 return true 19490 } 19491 return false 19492 } 19493 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { 19494 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 19495 // cond: is32Bit(off1+off2) 19496 // result: (MOVSSload [off1+off2] {sym} ptr mem) 19497 for { 19498 off1 := v.AuxInt 19499 sym := v.Aux 19500 mem := v.Args[1] 19501 v_0 := v.Args[0] 19502 if v_0.Op != OpAMD64ADDQconst { 19503 break 19504 } 19505 off2 := v_0.AuxInt 19506 ptr := v_0.Args[0] 19507 if !(is32Bit(off1 + off2)) { 19508 break 19509 } 19510 v.reset(OpAMD64MOVSSload) 19511 v.AuxInt = off1 + off2 19512 v.Aux = sym 19513 v.AddArg(ptr) 19514 v.AddArg(mem) 19515 return true 19516 } 19517 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 19518 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19519 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 19520 for { 19521 off1 := v.AuxInt 19522 sym1 := v.Aux 19523 mem := v.Args[1] 19524 v_0 := v.Args[0] 19525 if v_0.Op != OpAMD64LEAQ { 19526 break 19527 } 19528 off2 := v_0.AuxInt 19529 sym2 := v_0.Aux 19530 base := v_0.Args[0] 19531 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19532 break 19533 } 19534 v.reset(OpAMD64MOVSSload) 19535 v.AuxInt = off1 + off2 19536 v.Aux = mergeSym(sym1, sym2) 19537 v.AddArg(base) 19538 v.AddArg(mem) 19539 return true 19540 } 19541 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 19542 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19543 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 19544 for { 19545 off1 := v.AuxInt 19546 sym1 := v.Aux 19547 mem := v.Args[1] 19548 v_0 := v.Args[0] 19549 if v_0.Op != OpAMD64LEAQ1 { 19550 break 19551 } 19552 off2 := v_0.AuxInt 19553 sym2 := v_0.Aux 19554 idx := v_0.Args[1] 19555 ptr := v_0.Args[0] 19556 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19557 break 19558 } 19559 v.reset(OpAMD64MOVSSloadidx1) 19560 v.AuxInt = off1 + off2 19561 v.Aux = mergeSym(sym1, sym2) 19562 v.AddArg(ptr) 19563 v.AddArg(idx) 19564 v.AddArg(mem) 19565 return true 19566 } 19567 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 19568 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19569 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 19570 for { 19571 off1 := v.AuxInt 19572 sym1 := v.Aux 19573 mem := v.Args[1] 19574 v_0 := v.Args[0] 19575 if v_0.Op != OpAMD64LEAQ4 { 19576 break 19577 } 19578 off2 := v_0.AuxInt 19579 sym2 := v_0.Aux 19580 idx := v_0.Args[1] 19581 ptr := v_0.Args[0] 19582 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19583 break 19584 } 19585 v.reset(OpAMD64MOVSSloadidx4) 19586 v.AuxInt = off1 + off2 19587 v.Aux = mergeSym(sym1, sym2) 19588 v.AddArg(ptr) 19589 v.AddArg(idx) 19590 v.AddArg(mem) 19591 return true 19592 } 19593 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 19594 // cond: ptr.Op != OpSB 19595 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 19596 for { 19597 off := v.AuxInt 19598 sym := v.Aux 19599 mem := v.Args[1] 19600 v_0 := v.Args[0] 19601 if v_0.Op != OpAMD64ADDQ { 19602 break 19603 } 19604 idx := v_0.Args[1] 19605 ptr := v_0.Args[0] 19606 if !(ptr.Op != OpSB) { 19607 break 19608 } 19609 v.reset(OpAMD64MOVSSloadidx1) 19610 v.AuxInt = off 19611 v.Aux = sym 19612 v.AddArg(ptr) 19613 v.AddArg(idx) 19614 v.AddArg(mem) 19615 return true 19616 } 19617 // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) 19618 // result: (MOVLi2f val) 19619 for { 19620 off := v.AuxInt 19621 sym := v.Aux 19622 _ = v.Args[1] 19623 ptr := v.Args[0] 19624 v_1 := v.Args[1] 19625 if v_1.Op != OpAMD64MOVLstore || v_1.AuxInt != off || v_1.Aux != sym { 19626 break 19627 } 19628 _ = v_1.Args[2] 19629 if ptr != v_1.Args[0] { 19630 break 19631 } 19632 val := v_1.Args[1] 19633 v.reset(OpAMD64MOVLi2f) 19634 v.AddArg(val) 19635 return true 19636 } 19637 return false 19638 } 19639 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { 19640 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 19641 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 19642 for { 19643 c := v.AuxInt 19644 sym := v.Aux 19645 mem := v.Args[2] 19646 ptr := v.Args[0] 19647 v_1 := v.Args[1] 19648 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { 19649 break 19650 } 19651 idx := v_1.Args[0] 19652 v.reset(OpAMD64MOVSSloadidx4) 19653 v.AuxInt = c 19654 v.Aux = sym 19655 v.AddArg(ptr) 19656 v.AddArg(idx) 19657 v.AddArg(mem) 19658 return true 19659 } 19660 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 19661 // cond: is32Bit(c+d) 19662 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 19663 for { 19664 c := v.AuxInt 19665 sym := v.Aux 19666 mem := v.Args[2] 19667 v_0 := v.Args[0] 19668 if v_0.Op != OpAMD64ADDQconst { 19669 break 19670 } 19671 d := v_0.AuxInt 19672 ptr := v_0.Args[0] 19673 idx := v.Args[1] 19674 if !(is32Bit(c + d)) { 19675 break 19676 } 19677 v.reset(OpAMD64MOVSSloadidx1) 19678 v.AuxInt = c + d 19679 v.Aux = sym 19680 v.AddArg(ptr) 19681 v.AddArg(idx) 19682 v.AddArg(mem) 19683 return true 19684 } 19685 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 19686 // cond: is32Bit(c+d) 19687 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 19688 for { 19689 c := v.AuxInt 19690 sym := v.Aux 19691 mem := v.Args[2] 19692 ptr := v.Args[0] 19693 v_1 := v.Args[1] 19694 if v_1.Op != OpAMD64ADDQconst { 19695 break 19696 } 19697 d := v_1.AuxInt 19698 idx := v_1.Args[0] 19699 if !(is32Bit(c + d)) { 19700 break 19701 } 19702 v.reset(OpAMD64MOVSSloadidx1) 19703 v.AuxInt = c + d 19704 v.Aux = sym 19705 v.AddArg(ptr) 19706 v.AddArg(idx) 19707 v.AddArg(mem) 19708 return true 19709 } 19710 // match: (MOVSSloadidx1 [i] {s} p (MOVQconst [c]) mem) 19711 // cond: is32Bit(i+c) 19712 // result: (MOVSSload [i+c] {s} p mem) 19713 for { 19714 i := v.AuxInt 19715 s := v.Aux 19716 mem := v.Args[2] 19717 p := v.Args[0] 19718 v_1 := v.Args[1] 19719 if v_1.Op != OpAMD64MOVQconst { 19720 break 19721 } 19722 c := v_1.AuxInt 19723 if !(is32Bit(i + c)) { 19724 break 19725 } 19726 v.reset(OpAMD64MOVSSload) 19727 v.AuxInt = i + c 19728 v.Aux = s 19729 v.AddArg(p) 19730 v.AddArg(mem) 19731 return true 19732 } 19733 return false 19734 } 19735 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { 19736 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 19737 // cond: is32Bit(c+d) 19738 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 19739 for { 19740 c := v.AuxInt 19741 sym := v.Aux 19742 mem := v.Args[2] 19743 v_0 := v.Args[0] 19744 if v_0.Op != OpAMD64ADDQconst { 19745 break 19746 } 19747 d := v_0.AuxInt 19748 ptr := v_0.Args[0] 19749 idx := v.Args[1] 19750 if !(is32Bit(c + d)) { 19751 break 19752 } 19753 v.reset(OpAMD64MOVSSloadidx4) 19754 v.AuxInt = c + d 19755 v.Aux = sym 19756 v.AddArg(ptr) 19757 v.AddArg(idx) 19758 v.AddArg(mem) 19759 return true 19760 } 19761 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 19762 // cond: is32Bit(c+4*d) 19763 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 19764 for { 19765 c := v.AuxInt 19766 sym := v.Aux 19767 mem := v.Args[2] 19768 ptr := v.Args[0] 19769 v_1 := v.Args[1] 19770 if v_1.Op != OpAMD64ADDQconst { 19771 break 19772 } 19773 d := v_1.AuxInt 19774 idx := v_1.Args[0] 19775 if !(is32Bit(c + 4*d)) { 19776 break 19777 } 19778 v.reset(OpAMD64MOVSSloadidx4) 19779 v.AuxInt = c + 4*d 19780 v.Aux = sym 19781 v.AddArg(ptr) 19782 v.AddArg(idx) 19783 v.AddArg(mem) 19784 return true 19785 } 19786 // match: (MOVSSloadidx4 [i] {s} p (MOVQconst [c]) mem) 19787 // cond: is32Bit(i+4*c) 19788 // result: (MOVSSload [i+4*c] {s} p mem) 19789 for { 19790 i := v.AuxInt 19791 s := v.Aux 19792 mem := v.Args[2] 19793 p := v.Args[0] 19794 v_1 := v.Args[1] 19795 if v_1.Op != OpAMD64MOVQconst { 19796 break 19797 } 19798 c := v_1.AuxInt 19799 if !(is32Bit(i + 4*c)) { 19800 break 19801 } 19802 v.reset(OpAMD64MOVSSload) 19803 v.AuxInt = i + 4*c 19804 v.Aux = s 19805 v.AddArg(p) 19806 v.AddArg(mem) 19807 return true 19808 } 19809 return false 19810 } 19811 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { 19812 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 19813 // cond: is32Bit(off1+off2) 19814 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 19815 for { 19816 off1 := v.AuxInt 19817 sym := v.Aux 19818 mem := v.Args[2] 19819 v_0 := v.Args[0] 19820 if v_0.Op != OpAMD64ADDQconst { 19821 break 19822 } 19823 off2 := v_0.AuxInt 19824 ptr := v_0.Args[0] 19825 val := v.Args[1] 19826 if !(is32Bit(off1 + off2)) { 19827 break 19828 } 19829 v.reset(OpAMD64MOVSSstore) 19830 v.AuxInt = off1 + off2 19831 v.Aux = sym 19832 v.AddArg(ptr) 19833 v.AddArg(val) 19834 v.AddArg(mem) 19835 return true 19836 } 19837 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 19838 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19839 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 19840 for { 19841 off1 := v.AuxInt 19842 sym1 := v.Aux 19843 mem := v.Args[2] 19844 v_0 := v.Args[0] 19845 if v_0.Op != OpAMD64LEAQ { 19846 break 19847 } 19848 off2 := v_0.AuxInt 19849 sym2 := v_0.Aux 19850 base := v_0.Args[0] 19851 val := v.Args[1] 19852 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19853 break 19854 } 19855 v.reset(OpAMD64MOVSSstore) 19856 v.AuxInt = off1 + off2 19857 v.Aux = mergeSym(sym1, sym2) 19858 v.AddArg(base) 19859 v.AddArg(val) 19860 v.AddArg(mem) 19861 return true 19862 } 19863 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 19864 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19865 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 19866 for { 19867 off1 := v.AuxInt 19868 sym1 := v.Aux 19869 mem := v.Args[2] 19870 v_0 := v.Args[0] 19871 if v_0.Op != OpAMD64LEAQ1 { 19872 break 19873 } 19874 off2 := v_0.AuxInt 19875 sym2 := v_0.Aux 19876 idx := v_0.Args[1] 19877 ptr := v_0.Args[0] 19878 val := v.Args[1] 19879 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19880 break 19881 } 19882 v.reset(OpAMD64MOVSSstoreidx1) 19883 v.AuxInt = off1 + off2 19884 v.Aux = mergeSym(sym1, sym2) 19885 v.AddArg(ptr) 19886 v.AddArg(idx) 19887 v.AddArg(val) 19888 v.AddArg(mem) 19889 return true 19890 } 19891 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 19892 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 19893 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 19894 for { 19895 off1 := v.AuxInt 19896 sym1 := v.Aux 19897 mem := v.Args[2] 19898 v_0 := v.Args[0] 19899 if v_0.Op != OpAMD64LEAQ4 { 19900 break 19901 } 19902 off2 := v_0.AuxInt 19903 sym2 := v_0.Aux 19904 idx := v_0.Args[1] 19905 ptr := v_0.Args[0] 19906 val := v.Args[1] 19907 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 19908 break 19909 } 19910 v.reset(OpAMD64MOVSSstoreidx4) 19911 v.AuxInt = off1 + off2 19912 v.Aux = mergeSym(sym1, sym2) 19913 v.AddArg(ptr) 19914 v.AddArg(idx) 19915 v.AddArg(val) 19916 v.AddArg(mem) 19917 return true 19918 } 19919 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 19920 // cond: ptr.Op != OpSB 19921 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 19922 for { 19923 off := v.AuxInt 19924 sym := v.Aux 19925 mem := v.Args[2] 19926 v_0 := v.Args[0] 19927 if v_0.Op != OpAMD64ADDQ { 19928 break 19929 } 19930 idx := v_0.Args[1] 19931 ptr := v_0.Args[0] 19932 val := v.Args[1] 19933 if !(ptr.Op != OpSB) { 19934 break 19935 } 19936 v.reset(OpAMD64MOVSSstoreidx1) 19937 v.AuxInt = off 19938 v.Aux = sym 19939 v.AddArg(ptr) 19940 v.AddArg(idx) 19941 v.AddArg(val) 19942 v.AddArg(mem) 19943 return true 19944 } 19945 // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) 19946 // result: (MOVLstore [off] {sym} ptr val mem) 19947 for { 19948 off := v.AuxInt 19949 sym := v.Aux 19950 mem := v.Args[2] 19951 ptr := v.Args[0] 19952 v_1 := v.Args[1] 19953 if v_1.Op != OpAMD64MOVLi2f { 19954 break 19955 } 19956 val := v_1.Args[0] 19957 v.reset(OpAMD64MOVLstore) 19958 v.AuxInt = off 19959 v.Aux = sym 19960 v.AddArg(ptr) 19961 v.AddArg(val) 19962 v.AddArg(mem) 19963 return true 19964 } 19965 return false 19966 } 19967 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { 19968 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 19969 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 19970 for { 19971 c := v.AuxInt 19972 sym := v.Aux 19973 mem := v.Args[3] 19974 ptr := v.Args[0] 19975 v_1 := v.Args[1] 19976 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 { 19977 break 19978 } 19979 idx := v_1.Args[0] 19980 val := v.Args[2] 19981 v.reset(OpAMD64MOVSSstoreidx4) 19982 v.AuxInt = c 19983 v.Aux = sym 19984 v.AddArg(ptr) 19985 v.AddArg(idx) 19986 v.AddArg(val) 19987 v.AddArg(mem) 19988 return true 19989 } 19990 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 19991 // cond: is32Bit(c+d) 19992 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 19993 for { 19994 c := v.AuxInt 19995 sym := v.Aux 19996 mem := v.Args[3] 19997 v_0 := v.Args[0] 19998 if v_0.Op != OpAMD64ADDQconst { 19999 break 20000 } 20001 d := v_0.AuxInt 20002 ptr := v_0.Args[0] 20003 idx := v.Args[1] 20004 val := v.Args[2] 20005 if !(is32Bit(c + d)) { 20006 break 20007 } 20008 v.reset(OpAMD64MOVSSstoreidx1) 20009 v.AuxInt = c + d 20010 v.Aux = sym 20011 v.AddArg(ptr) 20012 v.AddArg(idx) 20013 v.AddArg(val) 20014 v.AddArg(mem) 20015 return true 20016 } 20017 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 20018 // cond: is32Bit(c+d) 20019 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 20020 for { 20021 c := v.AuxInt 20022 sym := v.Aux 20023 mem := v.Args[3] 20024 ptr := v.Args[0] 20025 v_1 := v.Args[1] 20026 if v_1.Op != OpAMD64ADDQconst { 20027 break 20028 } 20029 d := v_1.AuxInt 20030 idx := v_1.Args[0] 20031 val := v.Args[2] 20032 if !(is32Bit(c + d)) { 20033 break 20034 } 20035 v.reset(OpAMD64MOVSSstoreidx1) 20036 v.AuxInt = c + d 20037 v.Aux = sym 20038 v.AddArg(ptr) 20039 v.AddArg(idx) 20040 v.AddArg(val) 20041 v.AddArg(mem) 20042 return true 20043 } 20044 // match: (MOVSSstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 20045 // cond: is32Bit(i+c) 20046 // result: (MOVSSstore [i+c] {s} p w mem) 20047 for { 20048 i := v.AuxInt 20049 s := v.Aux 20050 mem := v.Args[3] 20051 p := v.Args[0] 20052 v_1 := v.Args[1] 20053 if v_1.Op != OpAMD64MOVQconst { 20054 break 20055 } 20056 c := v_1.AuxInt 20057 w := v.Args[2] 20058 if !(is32Bit(i + c)) { 20059 break 20060 } 20061 v.reset(OpAMD64MOVSSstore) 20062 v.AuxInt = i + c 20063 v.Aux = s 20064 v.AddArg(p) 20065 v.AddArg(w) 20066 v.AddArg(mem) 20067 return true 20068 } 20069 return false 20070 } 20071 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { 20072 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 20073 // cond: is32Bit(c+d) 20074 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 20075 for { 20076 c := v.AuxInt 20077 sym := v.Aux 20078 mem := v.Args[3] 20079 v_0 := v.Args[0] 20080 if v_0.Op != OpAMD64ADDQconst { 20081 break 20082 } 20083 d := v_0.AuxInt 20084 ptr := v_0.Args[0] 20085 idx := v.Args[1] 20086 val := v.Args[2] 20087 if !(is32Bit(c + d)) { 20088 break 20089 } 20090 v.reset(OpAMD64MOVSSstoreidx4) 20091 v.AuxInt = c + d 20092 v.Aux = sym 20093 v.AddArg(ptr) 20094 v.AddArg(idx) 20095 v.AddArg(val) 20096 v.AddArg(mem) 20097 return true 20098 } 20099 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 20100 // cond: is32Bit(c+4*d) 20101 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 20102 for { 20103 c := v.AuxInt 20104 sym := v.Aux 20105 mem := v.Args[3] 20106 ptr := v.Args[0] 20107 v_1 := v.Args[1] 20108 if v_1.Op != OpAMD64ADDQconst { 20109 break 20110 } 20111 d := v_1.AuxInt 20112 idx := v_1.Args[0] 20113 val := v.Args[2] 20114 if !(is32Bit(c + 4*d)) { 20115 break 20116 } 20117 v.reset(OpAMD64MOVSSstoreidx4) 20118 v.AuxInt = c + 4*d 20119 v.Aux = sym 20120 v.AddArg(ptr) 20121 v.AddArg(idx) 20122 v.AddArg(val) 20123 v.AddArg(mem) 20124 return true 20125 } 20126 // match: (MOVSSstoreidx4 [i] {s} p (MOVQconst [c]) w mem) 20127 // cond: is32Bit(i+4*c) 20128 // result: (MOVSSstore [i+4*c] {s} p w mem) 20129 for { 20130 i := v.AuxInt 20131 s := v.Aux 20132 mem := v.Args[3] 20133 p := v.Args[0] 20134 v_1 := v.Args[1] 20135 if v_1.Op != OpAMD64MOVQconst { 20136 break 20137 } 20138 c := v_1.AuxInt 20139 w := v.Args[2] 20140 if !(is32Bit(i + 4*c)) { 20141 break 20142 } 20143 v.reset(OpAMD64MOVSSstore) 20144 v.AuxInt = i + 4*c 20145 v.Aux = s 20146 v.AddArg(p) 20147 v.AddArg(w) 20148 v.AddArg(mem) 20149 return true 20150 } 20151 return false 20152 } 20153 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { 20154 b := v.Block 20155 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 20156 // cond: x.Uses == 1 && clobber(x) 20157 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 20158 for { 20159 x := v.Args[0] 20160 if x.Op != OpAMD64MOVWload { 20161 break 20162 } 20163 off := x.AuxInt 20164 sym := x.Aux 20165 mem := x.Args[1] 20166 ptr := x.Args[0] 20167 if !(x.Uses == 1 && clobber(x)) { 20168 break 20169 } 20170 b = x.Block 20171 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) 20172 v.reset(OpCopy) 20173 v.AddArg(v0) 20174 v0.AuxInt = off 20175 v0.Aux = sym 20176 v0.AddArg(ptr) 20177 v0.AddArg(mem) 20178 return true 20179 } 20180 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 20181 // cond: x.Uses == 1 && clobber(x) 20182 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 20183 for { 20184 x := v.Args[0] 20185 if x.Op != OpAMD64MOVLload { 20186 break 20187 } 20188 off := x.AuxInt 20189 sym := x.Aux 20190 mem := x.Args[1] 20191 ptr := x.Args[0] 20192 if !(x.Uses == 1 && clobber(x)) { 20193 break 20194 } 20195 b = x.Block 20196 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) 20197 v.reset(OpCopy) 20198 v.AddArg(v0) 20199 v0.AuxInt = off 20200 v0.Aux = sym 20201 v0.AddArg(ptr) 20202 v0.AddArg(mem) 20203 return true 20204 } 20205 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 20206 // cond: x.Uses == 1 && clobber(x) 20207 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 20208 for { 20209 x := v.Args[0] 20210 if x.Op != OpAMD64MOVQload { 20211 break 20212 } 20213 off := x.AuxInt 20214 sym := x.Aux 20215 mem := x.Args[1] 20216 ptr := x.Args[0] 20217 if !(x.Uses == 1 && clobber(x)) { 20218 break 20219 } 20220 b = x.Block 20221 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) 20222 v.reset(OpCopy) 20223 v.AddArg(v0) 20224 v0.AuxInt = off 20225 v0.Aux = sym 20226 v0.AddArg(ptr) 20227 v0.AddArg(mem) 20228 return true 20229 } 20230 // match: (MOVWQSX (ANDLconst [c] x)) 20231 // cond: c & 0x8000 == 0 20232 // result: (ANDLconst [c & 0x7fff] x) 20233 for { 20234 v_0 := v.Args[0] 20235 if v_0.Op != OpAMD64ANDLconst { 20236 break 20237 } 20238 c := v_0.AuxInt 20239 x := v_0.Args[0] 20240 if !(c&0x8000 == 0) { 20241 break 20242 } 20243 v.reset(OpAMD64ANDLconst) 20244 v.AuxInt = c & 0x7fff 20245 v.AddArg(x) 20246 return true 20247 } 20248 // match: (MOVWQSX (MOVWQSX x)) 20249 // result: (MOVWQSX x) 20250 for { 20251 v_0 := v.Args[0] 20252 if v_0.Op != OpAMD64MOVWQSX { 20253 break 20254 } 20255 x := v_0.Args[0] 20256 v.reset(OpAMD64MOVWQSX) 20257 v.AddArg(x) 20258 return true 20259 } 20260 // match: (MOVWQSX (MOVBQSX x)) 20261 // result: (MOVBQSX x) 20262 for { 20263 v_0 := v.Args[0] 20264 if v_0.Op != OpAMD64MOVBQSX { 20265 break 20266 } 20267 x := v_0.Args[0] 20268 v.reset(OpAMD64MOVBQSX) 20269 v.AddArg(x) 20270 return true 20271 } 20272 return false 20273 } 20274 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { 20275 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 20276 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 20277 // result: (MOVWQSX x) 20278 for { 20279 off := v.AuxInt 20280 sym := v.Aux 20281 _ = v.Args[1] 20282 ptr := v.Args[0] 20283 v_1 := v.Args[1] 20284 if v_1.Op != OpAMD64MOVWstore { 20285 break 20286 } 20287 off2 := v_1.AuxInt 20288 sym2 := v_1.Aux 20289 _ = v_1.Args[2] 20290 ptr2 := v_1.Args[0] 20291 x := v_1.Args[1] 20292 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 20293 break 20294 } 20295 v.reset(OpAMD64MOVWQSX) 20296 v.AddArg(x) 20297 return true 20298 } 20299 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 20300 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 20301 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 20302 for { 20303 off1 := v.AuxInt 20304 sym1 := v.Aux 20305 mem := v.Args[1] 20306 v_0 := v.Args[0] 20307 if v_0.Op != OpAMD64LEAQ { 20308 break 20309 } 20310 off2 := v_0.AuxInt 20311 sym2 := v_0.Aux 20312 base := v_0.Args[0] 20313 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 20314 break 20315 } 20316 v.reset(OpAMD64MOVWQSXload) 20317 v.AuxInt = off1 + off2 20318 v.Aux = mergeSym(sym1, sym2) 20319 v.AddArg(base) 20320 v.AddArg(mem) 20321 return true 20322 } 20323 return false 20324 } 20325 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { 20326 b := v.Block 20327 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 20328 // cond: x.Uses == 1 && clobber(x) 20329 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 20330 for { 20331 x := v.Args[0] 20332 if x.Op != OpAMD64MOVWload { 20333 break 20334 } 20335 off := x.AuxInt 20336 sym := x.Aux 20337 mem := x.Args[1] 20338 ptr := x.Args[0] 20339 if !(x.Uses == 1 && clobber(x)) { 20340 break 20341 } 20342 b = x.Block 20343 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) 20344 v.reset(OpCopy) 20345 v.AddArg(v0) 20346 v0.AuxInt = off 20347 v0.Aux = sym 20348 v0.AddArg(ptr) 20349 v0.AddArg(mem) 20350 return true 20351 } 20352 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 20353 // cond: x.Uses == 1 && clobber(x) 20354 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 20355 for { 20356 x := v.Args[0] 20357 if x.Op != OpAMD64MOVLload { 20358 break 20359 } 20360 off := x.AuxInt 20361 sym := x.Aux 20362 mem := x.Args[1] 20363 ptr := x.Args[0] 20364 if !(x.Uses == 1 && clobber(x)) { 20365 break 20366 } 20367 b = x.Block 20368 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) 20369 v.reset(OpCopy) 20370 v.AddArg(v0) 20371 v0.AuxInt = off 20372 v0.Aux = sym 20373 v0.AddArg(ptr) 20374 v0.AddArg(mem) 20375 return true 20376 } 20377 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 20378 // cond: x.Uses == 1 && clobber(x) 20379 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 20380 for { 20381 x := v.Args[0] 20382 if x.Op != OpAMD64MOVQload { 20383 break 20384 } 20385 off := x.AuxInt 20386 sym := x.Aux 20387 mem := x.Args[1] 20388 ptr := x.Args[0] 20389 if !(x.Uses == 1 && clobber(x)) { 20390 break 20391 } 20392 b = x.Block 20393 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) 20394 v.reset(OpCopy) 20395 v.AddArg(v0) 20396 v0.AuxInt = off 20397 v0.Aux = sym 20398 v0.AddArg(ptr) 20399 v0.AddArg(mem) 20400 return true 20401 } 20402 // match: (MOVWQZX x) 20403 // cond: zeroUpper48Bits(x,3) 20404 // result: x 20405 for { 20406 x := v.Args[0] 20407 if !(zeroUpper48Bits(x, 3)) { 20408 break 20409 } 20410 v.reset(OpCopy) 20411 v.Type = x.Type 20412 v.AddArg(x) 20413 return true 20414 } 20415 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 20416 // cond: x.Uses == 1 && clobber(x) 20417 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 20418 for { 20419 x := v.Args[0] 20420 if x.Op != OpAMD64MOVWloadidx1 { 20421 break 20422 } 20423 off := x.AuxInt 20424 sym := x.Aux 20425 mem := x.Args[2] 20426 ptr := x.Args[0] 20427 idx := x.Args[1] 20428 if !(x.Uses == 1 && clobber(x)) { 20429 break 20430 } 20431 b = x.Block 20432 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 20433 v.reset(OpCopy) 20434 v.AddArg(v0) 20435 v0.AuxInt = off 20436 v0.Aux = sym 20437 v0.AddArg(ptr) 20438 v0.AddArg(idx) 20439 v0.AddArg(mem) 20440 return true 20441 } 20442 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 20443 // cond: x.Uses == 1 && clobber(x) 20444 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 20445 for { 20446 x := v.Args[0] 20447 if x.Op != OpAMD64MOVWloadidx2 { 20448 break 20449 } 20450 off := x.AuxInt 20451 sym := x.Aux 20452 mem := x.Args[2] 20453 ptr := x.Args[0] 20454 idx := x.Args[1] 20455 if !(x.Uses == 1 && clobber(x)) { 20456 break 20457 } 20458 b = x.Block 20459 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 20460 v.reset(OpCopy) 20461 v.AddArg(v0) 20462 v0.AuxInt = off 20463 v0.Aux = sym 20464 v0.AddArg(ptr) 20465 v0.AddArg(idx) 20466 v0.AddArg(mem) 20467 return true 20468 } 20469 // match: (MOVWQZX (ANDLconst [c] x)) 20470 // result: (ANDLconst [c & 0xffff] x) 20471 for { 20472 v_0 := v.Args[0] 20473 if v_0.Op != OpAMD64ANDLconst { 20474 break 20475 } 20476 c := v_0.AuxInt 20477 x := v_0.Args[0] 20478 v.reset(OpAMD64ANDLconst) 20479 v.AuxInt = c & 0xffff 20480 v.AddArg(x) 20481 return true 20482 } 20483 // match: (MOVWQZX (MOVWQZX x)) 20484 // result: (MOVWQZX x) 20485 for { 20486 v_0 := v.Args[0] 20487 if v_0.Op != OpAMD64MOVWQZX { 20488 break 20489 } 20490 x := v_0.Args[0] 20491 v.reset(OpAMD64MOVWQZX) 20492 v.AddArg(x) 20493 return true 20494 } 20495 // match: (MOVWQZX (MOVBQZX x)) 20496 // result: (MOVBQZX x) 20497 for { 20498 v_0 := v.Args[0] 20499 if v_0.Op != OpAMD64MOVBQZX { 20500 break 20501 } 20502 x := v_0.Args[0] 20503 v.reset(OpAMD64MOVBQZX) 20504 v.AddArg(x) 20505 return true 20506 } 20507 return false 20508 } 20509 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { 20510 b := v.Block 20511 config := b.Func.Config 20512 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 20513 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 20514 // result: (MOVWQZX x) 20515 for { 20516 off := v.AuxInt 20517 sym := v.Aux 20518 _ = v.Args[1] 20519 ptr := v.Args[0] 20520 v_1 := v.Args[1] 20521 if v_1.Op != OpAMD64MOVWstore { 20522 break 20523 } 20524 off2 := v_1.AuxInt 20525 sym2 := v_1.Aux 20526 _ = v_1.Args[2] 20527 ptr2 := v_1.Args[0] 20528 x := v_1.Args[1] 20529 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 20530 break 20531 } 20532 v.reset(OpAMD64MOVWQZX) 20533 v.AddArg(x) 20534 return true 20535 } 20536 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 20537 // cond: is32Bit(off1+off2) 20538 // result: (MOVWload [off1+off2] {sym} ptr mem) 20539 for { 20540 off1 := v.AuxInt 20541 sym := v.Aux 20542 mem := v.Args[1] 20543 v_0 := v.Args[0] 20544 if v_0.Op != OpAMD64ADDQconst { 20545 break 20546 } 20547 off2 := v_0.AuxInt 20548 ptr := v_0.Args[0] 20549 if !(is32Bit(off1 + off2)) { 20550 break 20551 } 20552 v.reset(OpAMD64MOVWload) 20553 v.AuxInt = off1 + off2 20554 v.Aux = sym 20555 v.AddArg(ptr) 20556 v.AddArg(mem) 20557 return true 20558 } 20559 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 20560 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 20561 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 20562 for { 20563 off1 := v.AuxInt 20564 sym1 := v.Aux 20565 mem := v.Args[1] 20566 v_0 := v.Args[0] 20567 if v_0.Op != OpAMD64LEAQ { 20568 break 20569 } 20570 off2 := v_0.AuxInt 20571 sym2 := v_0.Aux 20572 base := v_0.Args[0] 20573 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 20574 break 20575 } 20576 v.reset(OpAMD64MOVWload) 20577 v.AuxInt = off1 + off2 20578 v.Aux = mergeSym(sym1, sym2) 20579 v.AddArg(base) 20580 v.AddArg(mem) 20581 return true 20582 } 20583 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 20584 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 20585 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 20586 for { 20587 off1 := v.AuxInt 20588 sym1 := v.Aux 20589 mem := v.Args[1] 20590 v_0 := v.Args[0] 20591 if v_0.Op != OpAMD64LEAQ1 { 20592 break 20593 } 20594 off2 := v_0.AuxInt 20595 sym2 := v_0.Aux 20596 idx := v_0.Args[1] 20597 ptr := v_0.Args[0] 20598 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 20599 break 20600 } 20601 v.reset(OpAMD64MOVWloadidx1) 20602 v.AuxInt = off1 + off2 20603 v.Aux = mergeSym(sym1, sym2) 20604 v.AddArg(ptr) 20605 v.AddArg(idx) 20606 v.AddArg(mem) 20607 return true 20608 } 20609 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 20610 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 20611 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 20612 for { 20613 off1 := v.AuxInt 20614 sym1 := v.Aux 20615 mem := v.Args[1] 20616 v_0 := v.Args[0] 20617 if v_0.Op != OpAMD64LEAQ2 { 20618 break 20619 } 20620 off2 := v_0.AuxInt 20621 sym2 := v_0.Aux 20622 idx := v_0.Args[1] 20623 ptr := v_0.Args[0] 20624 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 20625 break 20626 } 20627 v.reset(OpAMD64MOVWloadidx2) 20628 v.AuxInt = off1 + off2 20629 v.Aux = mergeSym(sym1, sym2) 20630 v.AddArg(ptr) 20631 v.AddArg(idx) 20632 v.AddArg(mem) 20633 return true 20634 } 20635 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 20636 // cond: ptr.Op != OpSB 20637 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 20638 for { 20639 off := v.AuxInt 20640 sym := v.Aux 20641 mem := v.Args[1] 20642 v_0 := v.Args[0] 20643 if v_0.Op != OpAMD64ADDQ { 20644 break 20645 } 20646 idx := v_0.Args[1] 20647 ptr := v_0.Args[0] 20648 if !(ptr.Op != OpSB) { 20649 break 20650 } 20651 v.reset(OpAMD64MOVWloadidx1) 20652 v.AuxInt = off 20653 v.Aux = sym 20654 v.AddArg(ptr) 20655 v.AddArg(idx) 20656 v.AddArg(mem) 20657 return true 20658 } 20659 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 20660 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 20661 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 20662 for { 20663 off1 := v.AuxInt 20664 sym1 := v.Aux 20665 mem := v.Args[1] 20666 v_0 := v.Args[0] 20667 if v_0.Op != OpAMD64LEAL { 20668 break 20669 } 20670 off2 := v_0.AuxInt 20671 sym2 := v_0.Aux 20672 base := v_0.Args[0] 20673 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 20674 break 20675 } 20676 v.reset(OpAMD64MOVWload) 20677 v.AuxInt = off1 + off2 20678 v.Aux = mergeSym(sym1, sym2) 20679 v.AddArg(base) 20680 v.AddArg(mem) 20681 return true 20682 } 20683 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 20684 // cond: is32Bit(off1+off2) 20685 // result: (MOVWload [off1+off2] {sym} ptr mem) 20686 for { 20687 off1 := v.AuxInt 20688 sym := v.Aux 20689 mem := v.Args[1] 20690 v_0 := v.Args[0] 20691 if v_0.Op != OpAMD64ADDLconst { 20692 break 20693 } 20694 off2 := v_0.AuxInt 20695 ptr := v_0.Args[0] 20696 if !(is32Bit(off1 + off2)) { 20697 break 20698 } 20699 v.reset(OpAMD64MOVWload) 20700 v.AuxInt = off1 + off2 20701 v.Aux = sym 20702 v.AddArg(ptr) 20703 v.AddArg(mem) 20704 return true 20705 } 20706 // match: (MOVWload [off] {sym} (SB) _) 20707 // cond: symIsRO(sym) 20708 // result: (MOVLconst [int64(read16(sym, off, config.BigEndian))]) 20709 for { 20710 off := v.AuxInt 20711 sym := v.Aux 20712 _ = v.Args[1] 20713 v_0 := v.Args[0] 20714 if v_0.Op != OpSB || !(symIsRO(sym)) { 20715 break 20716 } 20717 v.reset(OpAMD64MOVLconst) 20718 v.AuxInt = int64(read16(sym, off, config.BigEndian)) 20719 return true 20720 } 20721 return false 20722 } 20723 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { 20724 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 20725 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 20726 for { 20727 c := v.AuxInt 20728 sym := v.Aux 20729 mem := v.Args[2] 20730 ptr := v.Args[0] 20731 v_1 := v.Args[1] 20732 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { 20733 break 20734 } 20735 idx := v_1.Args[0] 20736 v.reset(OpAMD64MOVWloadidx2) 20737 v.AuxInt = c 20738 v.Aux = sym 20739 v.AddArg(ptr) 20740 v.AddArg(idx) 20741 v.AddArg(mem) 20742 return true 20743 } 20744 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 20745 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 20746 for { 20747 c := v.AuxInt 20748 sym := v.Aux 20749 mem := v.Args[2] 20750 v_0 := v.Args[0] 20751 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { 20752 break 20753 } 20754 idx := v_0.Args[0] 20755 ptr := v.Args[1] 20756 v.reset(OpAMD64MOVWloadidx2) 20757 v.AuxInt = c 20758 v.Aux = sym 20759 v.AddArg(ptr) 20760 v.AddArg(idx) 20761 v.AddArg(mem) 20762 return true 20763 } 20764 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 20765 // cond: is32Bit(c+d) 20766 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 20767 for { 20768 c := v.AuxInt 20769 sym := v.Aux 20770 mem := v.Args[2] 20771 v_0 := v.Args[0] 20772 if v_0.Op != OpAMD64ADDQconst { 20773 break 20774 } 20775 d := v_0.AuxInt 20776 ptr := v_0.Args[0] 20777 idx := v.Args[1] 20778 if !(is32Bit(c + d)) { 20779 break 20780 } 20781 v.reset(OpAMD64MOVWloadidx1) 20782 v.AuxInt = c + d 20783 v.Aux = sym 20784 v.AddArg(ptr) 20785 v.AddArg(idx) 20786 v.AddArg(mem) 20787 return true 20788 } 20789 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 20790 // cond: is32Bit(c+d) 20791 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 20792 for { 20793 c := v.AuxInt 20794 sym := v.Aux 20795 mem := v.Args[2] 20796 idx := v.Args[0] 20797 v_1 := v.Args[1] 20798 if v_1.Op != OpAMD64ADDQconst { 20799 break 20800 } 20801 d := v_1.AuxInt 20802 ptr := v_1.Args[0] 20803 if !(is32Bit(c + d)) { 20804 break 20805 } 20806 v.reset(OpAMD64MOVWloadidx1) 20807 v.AuxInt = c + d 20808 v.Aux = sym 20809 v.AddArg(ptr) 20810 v.AddArg(idx) 20811 v.AddArg(mem) 20812 return true 20813 } 20814 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 20815 // cond: is32Bit(c+d) 20816 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 20817 for { 20818 c := v.AuxInt 20819 sym := v.Aux 20820 mem := v.Args[2] 20821 ptr := v.Args[0] 20822 v_1 := v.Args[1] 20823 if v_1.Op != OpAMD64ADDQconst { 20824 break 20825 } 20826 d := v_1.AuxInt 20827 idx := v_1.Args[0] 20828 if !(is32Bit(c + d)) { 20829 break 20830 } 20831 v.reset(OpAMD64MOVWloadidx1) 20832 v.AuxInt = c + d 20833 v.Aux = sym 20834 v.AddArg(ptr) 20835 v.AddArg(idx) 20836 v.AddArg(mem) 20837 return true 20838 } 20839 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 20840 // cond: is32Bit(c+d) 20841 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 20842 for { 20843 c := v.AuxInt 20844 sym := v.Aux 20845 mem := v.Args[2] 20846 v_0 := v.Args[0] 20847 if v_0.Op != OpAMD64ADDQconst { 20848 break 20849 } 20850 d := v_0.AuxInt 20851 idx := v_0.Args[0] 20852 ptr := v.Args[1] 20853 if !(is32Bit(c + d)) { 20854 break 20855 } 20856 v.reset(OpAMD64MOVWloadidx1) 20857 v.AuxInt = c + d 20858 v.Aux = sym 20859 v.AddArg(ptr) 20860 v.AddArg(idx) 20861 v.AddArg(mem) 20862 return true 20863 } 20864 // match: (MOVWloadidx1 [i] {s} p (MOVQconst [c]) mem) 20865 // cond: is32Bit(i+c) 20866 // result: (MOVWload [i+c] {s} p mem) 20867 for { 20868 i := v.AuxInt 20869 s := v.Aux 20870 mem := v.Args[2] 20871 p := v.Args[0] 20872 v_1 := v.Args[1] 20873 if v_1.Op != OpAMD64MOVQconst { 20874 break 20875 } 20876 c := v_1.AuxInt 20877 if !(is32Bit(i + c)) { 20878 break 20879 } 20880 v.reset(OpAMD64MOVWload) 20881 v.AuxInt = i + c 20882 v.Aux = s 20883 v.AddArg(p) 20884 v.AddArg(mem) 20885 return true 20886 } 20887 // match: (MOVWloadidx1 [i] {s} (MOVQconst [c]) p mem) 20888 // cond: is32Bit(i+c) 20889 // result: (MOVWload [i+c] {s} p mem) 20890 for { 20891 i := v.AuxInt 20892 s := v.Aux 20893 mem := v.Args[2] 20894 v_0 := v.Args[0] 20895 if v_0.Op != OpAMD64MOVQconst { 20896 break 20897 } 20898 c := v_0.AuxInt 20899 p := v.Args[1] 20900 if !(is32Bit(i + c)) { 20901 break 20902 } 20903 v.reset(OpAMD64MOVWload) 20904 v.AuxInt = i + c 20905 v.Aux = s 20906 v.AddArg(p) 20907 v.AddArg(mem) 20908 return true 20909 } 20910 return false 20911 } 20912 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { 20913 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 20914 // cond: is32Bit(c+d) 20915 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 20916 for { 20917 c := v.AuxInt 20918 sym := v.Aux 20919 mem := v.Args[2] 20920 v_0 := v.Args[0] 20921 if v_0.Op != OpAMD64ADDQconst { 20922 break 20923 } 20924 d := v_0.AuxInt 20925 ptr := v_0.Args[0] 20926 idx := v.Args[1] 20927 if !(is32Bit(c + d)) { 20928 break 20929 } 20930 v.reset(OpAMD64MOVWloadidx2) 20931 v.AuxInt = c + d 20932 v.Aux = sym 20933 v.AddArg(ptr) 20934 v.AddArg(idx) 20935 v.AddArg(mem) 20936 return true 20937 } 20938 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 20939 // cond: is32Bit(c+2*d) 20940 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 20941 for { 20942 c := v.AuxInt 20943 sym := v.Aux 20944 mem := v.Args[2] 20945 ptr := v.Args[0] 20946 v_1 := v.Args[1] 20947 if v_1.Op != OpAMD64ADDQconst { 20948 break 20949 } 20950 d := v_1.AuxInt 20951 idx := v_1.Args[0] 20952 if !(is32Bit(c + 2*d)) { 20953 break 20954 } 20955 v.reset(OpAMD64MOVWloadidx2) 20956 v.AuxInt = c + 2*d 20957 v.Aux = sym 20958 v.AddArg(ptr) 20959 v.AddArg(idx) 20960 v.AddArg(mem) 20961 return true 20962 } 20963 // match: (MOVWloadidx2 [i] {s} p (MOVQconst [c]) mem) 20964 // cond: is32Bit(i+2*c) 20965 // result: (MOVWload [i+2*c] {s} p mem) 20966 for { 20967 i := v.AuxInt 20968 s := v.Aux 20969 mem := v.Args[2] 20970 p := v.Args[0] 20971 v_1 := v.Args[1] 20972 if v_1.Op != OpAMD64MOVQconst { 20973 break 20974 } 20975 c := v_1.AuxInt 20976 if !(is32Bit(i + 2*c)) { 20977 break 20978 } 20979 v.reset(OpAMD64MOVWload) 20980 v.AuxInt = i + 2*c 20981 v.Aux = s 20982 v.AddArg(p) 20983 v.AddArg(mem) 20984 return true 20985 } 20986 return false 20987 } 20988 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { 20989 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 20990 // result: (MOVWstore [off] {sym} ptr x mem) 20991 for { 20992 off := v.AuxInt 20993 sym := v.Aux 20994 mem := v.Args[2] 20995 ptr := v.Args[0] 20996 v_1 := v.Args[1] 20997 if v_1.Op != OpAMD64MOVWQSX { 20998 break 20999 } 21000 x := v_1.Args[0] 21001 v.reset(OpAMD64MOVWstore) 21002 v.AuxInt = off 21003 v.Aux = sym 21004 v.AddArg(ptr) 21005 v.AddArg(x) 21006 v.AddArg(mem) 21007 return true 21008 } 21009 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 21010 // result: (MOVWstore [off] {sym} ptr x mem) 21011 for { 21012 off := v.AuxInt 21013 sym := v.Aux 21014 mem := v.Args[2] 21015 ptr := v.Args[0] 21016 v_1 := v.Args[1] 21017 if v_1.Op != OpAMD64MOVWQZX { 21018 break 21019 } 21020 x := v_1.Args[0] 21021 v.reset(OpAMD64MOVWstore) 21022 v.AuxInt = off 21023 v.Aux = sym 21024 v.AddArg(ptr) 21025 v.AddArg(x) 21026 v.AddArg(mem) 21027 return true 21028 } 21029 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 21030 // cond: is32Bit(off1+off2) 21031 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 21032 for { 21033 off1 := v.AuxInt 21034 sym := v.Aux 21035 mem := v.Args[2] 21036 v_0 := v.Args[0] 21037 if v_0.Op != OpAMD64ADDQconst { 21038 break 21039 } 21040 off2 := v_0.AuxInt 21041 ptr := v_0.Args[0] 21042 val := v.Args[1] 21043 if !(is32Bit(off1 + off2)) { 21044 break 21045 } 21046 v.reset(OpAMD64MOVWstore) 21047 v.AuxInt = off1 + off2 21048 v.Aux = sym 21049 v.AddArg(ptr) 21050 v.AddArg(val) 21051 v.AddArg(mem) 21052 return true 21053 } 21054 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 21055 // cond: validOff(off) 21056 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 21057 for { 21058 off := v.AuxInt 21059 sym := v.Aux 21060 mem := v.Args[2] 21061 ptr := v.Args[0] 21062 v_1 := v.Args[1] 21063 if v_1.Op != OpAMD64MOVLconst { 21064 break 21065 } 21066 c := v_1.AuxInt 21067 if !(validOff(off)) { 21068 break 21069 } 21070 v.reset(OpAMD64MOVWstoreconst) 21071 v.AuxInt = makeValAndOff(int64(int16(c)), off) 21072 v.Aux = sym 21073 v.AddArg(ptr) 21074 v.AddArg(mem) 21075 return true 21076 } 21077 // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem) 21078 // cond: validOff(off) 21079 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 21080 for { 21081 off := v.AuxInt 21082 sym := v.Aux 21083 mem := v.Args[2] 21084 ptr := v.Args[0] 21085 v_1 := v.Args[1] 21086 if v_1.Op != OpAMD64MOVQconst { 21087 break 21088 } 21089 c := v_1.AuxInt 21090 if !(validOff(off)) { 21091 break 21092 } 21093 v.reset(OpAMD64MOVWstoreconst) 21094 v.AuxInt = makeValAndOff(int64(int16(c)), off) 21095 v.Aux = sym 21096 v.AddArg(ptr) 21097 v.AddArg(mem) 21098 return true 21099 } 21100 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 21101 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21102 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 21103 for { 21104 off1 := v.AuxInt 21105 sym1 := v.Aux 21106 mem := v.Args[2] 21107 v_0 := v.Args[0] 21108 if v_0.Op != OpAMD64LEAQ { 21109 break 21110 } 21111 off2 := v_0.AuxInt 21112 sym2 := v_0.Aux 21113 base := v_0.Args[0] 21114 val := v.Args[1] 21115 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21116 break 21117 } 21118 v.reset(OpAMD64MOVWstore) 21119 v.AuxInt = off1 + off2 21120 v.Aux = mergeSym(sym1, sym2) 21121 v.AddArg(base) 21122 v.AddArg(val) 21123 v.AddArg(mem) 21124 return true 21125 } 21126 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 21127 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21128 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 21129 for { 21130 off1 := v.AuxInt 21131 sym1 := v.Aux 21132 mem := v.Args[2] 21133 v_0 := v.Args[0] 21134 if v_0.Op != OpAMD64LEAQ1 { 21135 break 21136 } 21137 off2 := v_0.AuxInt 21138 sym2 := v_0.Aux 21139 idx := v_0.Args[1] 21140 ptr := v_0.Args[0] 21141 val := v.Args[1] 21142 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21143 break 21144 } 21145 v.reset(OpAMD64MOVWstoreidx1) 21146 v.AuxInt = off1 + off2 21147 v.Aux = mergeSym(sym1, sym2) 21148 v.AddArg(ptr) 21149 v.AddArg(idx) 21150 v.AddArg(val) 21151 v.AddArg(mem) 21152 return true 21153 } 21154 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 21155 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 21156 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 21157 for { 21158 off1 := v.AuxInt 21159 sym1 := v.Aux 21160 mem := v.Args[2] 21161 v_0 := v.Args[0] 21162 if v_0.Op != OpAMD64LEAQ2 { 21163 break 21164 } 21165 off2 := v_0.AuxInt 21166 sym2 := v_0.Aux 21167 idx := v_0.Args[1] 21168 ptr := v_0.Args[0] 21169 val := v.Args[1] 21170 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 21171 break 21172 } 21173 v.reset(OpAMD64MOVWstoreidx2) 21174 v.AuxInt = off1 + off2 21175 v.Aux = mergeSym(sym1, sym2) 21176 v.AddArg(ptr) 21177 v.AddArg(idx) 21178 v.AddArg(val) 21179 v.AddArg(mem) 21180 return true 21181 } 21182 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 21183 // cond: ptr.Op != OpSB 21184 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 21185 for { 21186 off := v.AuxInt 21187 sym := v.Aux 21188 mem := v.Args[2] 21189 v_0 := v.Args[0] 21190 if v_0.Op != OpAMD64ADDQ { 21191 break 21192 } 21193 idx := v_0.Args[1] 21194 ptr := v_0.Args[0] 21195 val := v.Args[1] 21196 if !(ptr.Op != OpSB) { 21197 break 21198 } 21199 v.reset(OpAMD64MOVWstoreidx1) 21200 v.AuxInt = off 21201 v.Aux = sym 21202 v.AddArg(ptr) 21203 v.AddArg(idx) 21204 v.AddArg(val) 21205 v.AddArg(mem) 21206 return true 21207 } 21208 // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 21209 // cond: x.Uses == 1 && clobber(x) 21210 // result: (MOVLstore [i-2] {s} p w mem) 21211 for { 21212 i := v.AuxInt 21213 s := v.Aux 21214 _ = v.Args[2] 21215 p := v.Args[0] 21216 v_1 := v.Args[1] 21217 if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 16 { 21218 break 21219 } 21220 w := v_1.Args[0] 21221 x := v.Args[2] 21222 if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s { 21223 break 21224 } 21225 mem := x.Args[2] 21226 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) { 21227 break 21228 } 21229 v.reset(OpAMD64MOVLstore) 21230 v.AuxInt = i - 2 21231 v.Aux = s 21232 v.AddArg(p) 21233 v.AddArg(w) 21234 v.AddArg(mem) 21235 return true 21236 } 21237 return false 21238 } 21239 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { 21240 b := v.Block 21241 typ := &b.Func.Config.Types 21242 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 21243 // cond: x.Uses == 1 && clobber(x) 21244 // result: (MOVLstore [i-2] {s} p w mem) 21245 for { 21246 i := v.AuxInt 21247 s := v.Aux 21248 _ = v.Args[2] 21249 p := v.Args[0] 21250 v_1 := v.Args[1] 21251 if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 16 { 21252 break 21253 } 21254 w := v_1.Args[0] 21255 x := v.Args[2] 21256 if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s { 21257 break 21258 } 21259 mem := x.Args[2] 21260 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) { 21261 break 21262 } 21263 v.reset(OpAMD64MOVLstore) 21264 v.AuxInt = i - 2 21265 v.Aux = s 21266 v.AddArg(p) 21267 v.AddArg(w) 21268 v.AddArg(mem) 21269 return true 21270 } 21271 // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) 21272 // cond: x.Uses == 1 && clobber(x) 21273 // result: (MOVLstore [i-2] {s} p w0 mem) 21274 for { 21275 i := v.AuxInt 21276 s := v.Aux 21277 _ = v.Args[2] 21278 p := v.Args[0] 21279 v_1 := v.Args[1] 21280 if v_1.Op != OpAMD64SHRLconst { 21281 break 21282 } 21283 j := v_1.AuxInt 21284 w := v_1.Args[0] 21285 x := v.Args[2] 21286 if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s { 21287 break 21288 } 21289 mem := x.Args[2] 21290 if p != x.Args[0] { 21291 break 21292 } 21293 w0 := x.Args[1] 21294 if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 21295 break 21296 } 21297 v.reset(OpAMD64MOVLstore) 21298 v.AuxInt = i - 2 21299 v.Aux = s 21300 v.AddArg(p) 21301 v.AddArg(w0) 21302 v.AddArg(mem) 21303 return true 21304 } 21305 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 21306 // cond: x.Uses == 1 && clobber(x) 21307 // result: (MOVLstore [i-2] {s} p w0 mem) 21308 for { 21309 i := v.AuxInt 21310 s := v.Aux 21311 _ = v.Args[2] 21312 p := v.Args[0] 21313 v_1 := v.Args[1] 21314 if v_1.Op != OpAMD64SHRQconst { 21315 break 21316 } 21317 j := v_1.AuxInt 21318 w := v_1.Args[0] 21319 x := v.Args[2] 21320 if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s { 21321 break 21322 } 21323 mem := x.Args[2] 21324 if p != x.Args[0] { 21325 break 21326 } 21327 w0 := x.Args[1] 21328 if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 21329 break 21330 } 21331 v.reset(OpAMD64MOVLstore) 21332 v.AuxInt = i - 2 21333 v.Aux = s 21334 v.AddArg(p) 21335 v.AddArg(w0) 21336 v.AddArg(mem) 21337 return true 21338 } 21339 // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem)) 21340 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 21341 // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) 21342 for { 21343 i := v.AuxInt 21344 s := v.Aux 21345 _ = v.Args[2] 21346 p := v.Args[0] 21347 x1 := v.Args[1] 21348 if x1.Op != OpAMD64MOVWload { 21349 break 21350 } 21351 j := x1.AuxInt 21352 s2 := x1.Aux 21353 mem := x1.Args[1] 21354 p2 := x1.Args[0] 21355 mem2 := v.Args[2] 21356 if mem2.Op != OpAMD64MOVWstore || mem2.AuxInt != i-2 || mem2.Aux != s { 21357 break 21358 } 21359 _ = mem2.Args[2] 21360 if p != mem2.Args[0] { 21361 break 21362 } 21363 x2 := mem2.Args[1] 21364 if x2.Op != OpAMD64MOVWload || x2.AuxInt != j-2 || x2.Aux != s2 { 21365 break 21366 } 21367 _ = x2.Args[1] 21368 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 21369 break 21370 } 21371 v.reset(OpAMD64MOVLstore) 21372 v.AuxInt = i - 2 21373 v.Aux = s 21374 v.AddArg(p) 21375 v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32) 21376 v0.AuxInt = j - 2 21377 v0.Aux = s2 21378 v0.AddArg(p2) 21379 v0.AddArg(mem) 21380 v.AddArg(v0) 21381 v.AddArg(mem) 21382 return true 21383 } 21384 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 21385 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 21386 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 21387 for { 21388 off1 := v.AuxInt 21389 sym1 := v.Aux 21390 mem := v.Args[2] 21391 v_0 := v.Args[0] 21392 if v_0.Op != OpAMD64LEAL { 21393 break 21394 } 21395 off2 := v_0.AuxInt 21396 sym2 := v_0.Aux 21397 base := v_0.Args[0] 21398 val := v.Args[1] 21399 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 21400 break 21401 } 21402 v.reset(OpAMD64MOVWstore) 21403 v.AuxInt = off1 + off2 21404 v.Aux = mergeSym(sym1, sym2) 21405 v.AddArg(base) 21406 v.AddArg(val) 21407 v.AddArg(mem) 21408 return true 21409 } 21410 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 21411 // cond: is32Bit(off1+off2) 21412 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 21413 for { 21414 off1 := v.AuxInt 21415 sym := v.Aux 21416 mem := v.Args[2] 21417 v_0 := v.Args[0] 21418 if v_0.Op != OpAMD64ADDLconst { 21419 break 21420 } 21421 off2 := v_0.AuxInt 21422 ptr := v_0.Args[0] 21423 val := v.Args[1] 21424 if !(is32Bit(off1 + off2)) { 21425 break 21426 } 21427 v.reset(OpAMD64MOVWstore) 21428 v.AuxInt = off1 + off2 21429 v.Aux = sym 21430 v.AddArg(ptr) 21431 v.AddArg(val) 21432 v.AddArg(mem) 21433 return true 21434 } 21435 return false 21436 } 21437 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { 21438 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 21439 // cond: ValAndOff(sc).canAdd(off) 21440 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 21441 for { 21442 sc := v.AuxInt 21443 s := v.Aux 21444 mem := v.Args[1] 21445 v_0 := v.Args[0] 21446 if v_0.Op != OpAMD64ADDQconst { 21447 break 21448 } 21449 off := v_0.AuxInt 21450 ptr := v_0.Args[0] 21451 if !(ValAndOff(sc).canAdd(off)) { 21452 break 21453 } 21454 v.reset(OpAMD64MOVWstoreconst) 21455 v.AuxInt = ValAndOff(sc).add(off) 21456 v.Aux = s 21457 v.AddArg(ptr) 21458 v.AddArg(mem) 21459 return true 21460 } 21461 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 21462 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 21463 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 21464 for { 21465 sc := v.AuxInt 21466 sym1 := v.Aux 21467 mem := v.Args[1] 21468 v_0 := v.Args[0] 21469 if v_0.Op != OpAMD64LEAQ { 21470 break 21471 } 21472 off := v_0.AuxInt 21473 sym2 := v_0.Aux 21474 ptr := v_0.Args[0] 21475 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 21476 break 21477 } 21478 v.reset(OpAMD64MOVWstoreconst) 21479 v.AuxInt = ValAndOff(sc).add(off) 21480 v.Aux = mergeSym(sym1, sym2) 21481 v.AddArg(ptr) 21482 v.AddArg(mem) 21483 return true 21484 } 21485 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 21486 // cond: canMergeSym(sym1, sym2) 21487 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 21488 for { 21489 x := v.AuxInt 21490 sym1 := v.Aux 21491 mem := v.Args[1] 21492 v_0 := v.Args[0] 21493 if v_0.Op != OpAMD64LEAQ1 { 21494 break 21495 } 21496 off := v_0.AuxInt 21497 sym2 := v_0.Aux 21498 idx := v_0.Args[1] 21499 ptr := v_0.Args[0] 21500 if !(canMergeSym(sym1, sym2)) { 21501 break 21502 } 21503 v.reset(OpAMD64MOVWstoreconstidx1) 21504 v.AuxInt = ValAndOff(x).add(off) 21505 v.Aux = mergeSym(sym1, sym2) 21506 v.AddArg(ptr) 21507 v.AddArg(idx) 21508 v.AddArg(mem) 21509 return true 21510 } 21511 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 21512 // cond: canMergeSym(sym1, sym2) 21513 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 21514 for { 21515 x := v.AuxInt 21516 sym1 := v.Aux 21517 mem := v.Args[1] 21518 v_0 := v.Args[0] 21519 if v_0.Op != OpAMD64LEAQ2 { 21520 break 21521 } 21522 off := v_0.AuxInt 21523 sym2 := v_0.Aux 21524 idx := v_0.Args[1] 21525 ptr := v_0.Args[0] 21526 if !(canMergeSym(sym1, sym2)) { 21527 break 21528 } 21529 v.reset(OpAMD64MOVWstoreconstidx2) 21530 v.AuxInt = ValAndOff(x).add(off) 21531 v.Aux = mergeSym(sym1, sym2) 21532 v.AddArg(ptr) 21533 v.AddArg(idx) 21534 v.AddArg(mem) 21535 return true 21536 } 21537 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 21538 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 21539 for { 21540 x := v.AuxInt 21541 sym := v.Aux 21542 mem := v.Args[1] 21543 v_0 := v.Args[0] 21544 if v_0.Op != OpAMD64ADDQ { 21545 break 21546 } 21547 idx := v_0.Args[1] 21548 ptr := v_0.Args[0] 21549 v.reset(OpAMD64MOVWstoreconstidx1) 21550 v.AuxInt = x 21551 v.Aux = sym 21552 v.AddArg(ptr) 21553 v.AddArg(idx) 21554 v.AddArg(mem) 21555 return true 21556 } 21557 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 21558 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 21559 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 21560 for { 21561 c := v.AuxInt 21562 s := v.Aux 21563 _ = v.Args[1] 21564 p := v.Args[0] 21565 x := v.Args[1] 21566 if x.Op != OpAMD64MOVWstoreconst { 21567 break 21568 } 21569 a := x.AuxInt 21570 if x.Aux != s { 21571 break 21572 } 21573 mem := x.Args[1] 21574 if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 21575 break 21576 } 21577 v.reset(OpAMD64MOVLstoreconst) 21578 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 21579 v.Aux = s 21580 v.AddArg(p) 21581 v.AddArg(mem) 21582 return true 21583 } 21584 // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) 21585 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 21586 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 21587 for { 21588 a := v.AuxInt 21589 s := v.Aux 21590 _ = v.Args[1] 21591 p := v.Args[0] 21592 x := v.Args[1] 21593 if x.Op != OpAMD64MOVWstoreconst { 21594 break 21595 } 21596 c := x.AuxInt 21597 if x.Aux != s { 21598 break 21599 } 21600 mem := x.Args[1] 21601 if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 21602 break 21603 } 21604 v.reset(OpAMD64MOVLstoreconst) 21605 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 21606 v.Aux = s 21607 v.AddArg(p) 21608 v.AddArg(mem) 21609 return true 21610 } 21611 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 21612 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 21613 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 21614 for { 21615 sc := v.AuxInt 21616 sym1 := v.Aux 21617 mem := v.Args[1] 21618 v_0 := v.Args[0] 21619 if v_0.Op != OpAMD64LEAL { 21620 break 21621 } 21622 off := v_0.AuxInt 21623 sym2 := v_0.Aux 21624 ptr := v_0.Args[0] 21625 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 21626 break 21627 } 21628 v.reset(OpAMD64MOVWstoreconst) 21629 v.AuxInt = ValAndOff(sc).add(off) 21630 v.Aux = mergeSym(sym1, sym2) 21631 v.AddArg(ptr) 21632 v.AddArg(mem) 21633 return true 21634 } 21635 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 21636 // cond: ValAndOff(sc).canAdd(off) 21637 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 21638 for { 21639 sc := v.AuxInt 21640 s := v.Aux 21641 mem := v.Args[1] 21642 v_0 := v.Args[0] 21643 if v_0.Op != OpAMD64ADDLconst { 21644 break 21645 } 21646 off := v_0.AuxInt 21647 ptr := v_0.Args[0] 21648 if !(ValAndOff(sc).canAdd(off)) { 21649 break 21650 } 21651 v.reset(OpAMD64MOVWstoreconst) 21652 v.AuxInt = ValAndOff(sc).add(off) 21653 v.Aux = s 21654 v.AddArg(ptr) 21655 v.AddArg(mem) 21656 return true 21657 } 21658 return false 21659 } 21660 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { 21661 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 21662 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 21663 for { 21664 c := v.AuxInt 21665 sym := v.Aux 21666 mem := v.Args[2] 21667 ptr := v.Args[0] 21668 v_1 := v.Args[1] 21669 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { 21670 break 21671 } 21672 idx := v_1.Args[0] 21673 v.reset(OpAMD64MOVWstoreconstidx2) 21674 v.AuxInt = c 21675 v.Aux = sym 21676 v.AddArg(ptr) 21677 v.AddArg(idx) 21678 v.AddArg(mem) 21679 return true 21680 } 21681 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 21682 // cond: ValAndOff(x).canAdd(c) 21683 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 21684 for { 21685 x := v.AuxInt 21686 sym := v.Aux 21687 mem := v.Args[2] 21688 v_0 := v.Args[0] 21689 if v_0.Op != OpAMD64ADDQconst { 21690 break 21691 } 21692 c := v_0.AuxInt 21693 ptr := v_0.Args[0] 21694 idx := v.Args[1] 21695 if !(ValAndOff(x).canAdd(c)) { 21696 break 21697 } 21698 v.reset(OpAMD64MOVWstoreconstidx1) 21699 v.AuxInt = ValAndOff(x).add(c) 21700 v.Aux = sym 21701 v.AddArg(ptr) 21702 v.AddArg(idx) 21703 v.AddArg(mem) 21704 return true 21705 } 21706 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 21707 // cond: ValAndOff(x).canAdd(c) 21708 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 21709 for { 21710 x := v.AuxInt 21711 sym := v.Aux 21712 mem := v.Args[2] 21713 ptr := v.Args[0] 21714 v_1 := v.Args[1] 21715 if v_1.Op != OpAMD64ADDQconst { 21716 break 21717 } 21718 c := v_1.AuxInt 21719 idx := v_1.Args[0] 21720 if !(ValAndOff(x).canAdd(c)) { 21721 break 21722 } 21723 v.reset(OpAMD64MOVWstoreconstidx1) 21724 v.AuxInt = ValAndOff(x).add(c) 21725 v.Aux = sym 21726 v.AddArg(ptr) 21727 v.AddArg(idx) 21728 v.AddArg(mem) 21729 return true 21730 } 21731 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 21732 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 21733 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 21734 for { 21735 c := v.AuxInt 21736 s := v.Aux 21737 _ = v.Args[2] 21738 p := v.Args[0] 21739 i := v.Args[1] 21740 x := v.Args[2] 21741 if x.Op != OpAMD64MOVWstoreconstidx1 { 21742 break 21743 } 21744 a := x.AuxInt 21745 if x.Aux != s { 21746 break 21747 } 21748 mem := x.Args[2] 21749 if p != x.Args[0] || i != x.Args[1] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 21750 break 21751 } 21752 v.reset(OpAMD64MOVLstoreconstidx1) 21753 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 21754 v.Aux = s 21755 v.AddArg(p) 21756 v.AddArg(i) 21757 v.AddArg(mem) 21758 return true 21759 } 21760 return false 21761 } 21762 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { 21763 b := v.Block 21764 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 21765 // cond: ValAndOff(x).canAdd(c) 21766 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 21767 for { 21768 x := v.AuxInt 21769 sym := v.Aux 21770 mem := v.Args[2] 21771 v_0 := v.Args[0] 21772 if v_0.Op != OpAMD64ADDQconst { 21773 break 21774 } 21775 c := v_0.AuxInt 21776 ptr := v_0.Args[0] 21777 idx := v.Args[1] 21778 if !(ValAndOff(x).canAdd(c)) { 21779 break 21780 } 21781 v.reset(OpAMD64MOVWstoreconstidx2) 21782 v.AuxInt = ValAndOff(x).add(c) 21783 v.Aux = sym 21784 v.AddArg(ptr) 21785 v.AddArg(idx) 21786 v.AddArg(mem) 21787 return true 21788 } 21789 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 21790 // cond: ValAndOff(x).canAdd(2*c) 21791 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 21792 for { 21793 x := v.AuxInt 21794 sym := v.Aux 21795 mem := v.Args[2] 21796 ptr := v.Args[0] 21797 v_1 := v.Args[1] 21798 if v_1.Op != OpAMD64ADDQconst { 21799 break 21800 } 21801 c := v_1.AuxInt 21802 idx := v_1.Args[0] 21803 if !(ValAndOff(x).canAdd(2 * c)) { 21804 break 21805 } 21806 v.reset(OpAMD64MOVWstoreconstidx2) 21807 v.AuxInt = ValAndOff(x).add(2 * c) 21808 v.Aux = sym 21809 v.AddArg(ptr) 21810 v.AddArg(idx) 21811 v.AddArg(mem) 21812 return true 21813 } 21814 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 21815 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 21816 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 21817 for { 21818 c := v.AuxInt 21819 s := v.Aux 21820 _ = v.Args[2] 21821 p := v.Args[0] 21822 i := v.Args[1] 21823 x := v.Args[2] 21824 if x.Op != OpAMD64MOVWstoreconstidx2 { 21825 break 21826 } 21827 a := x.AuxInt 21828 if x.Aux != s { 21829 break 21830 } 21831 mem := x.Args[2] 21832 if p != x.Args[0] || i != x.Args[1] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 21833 break 21834 } 21835 v.reset(OpAMD64MOVLstoreconstidx1) 21836 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 21837 v.Aux = s 21838 v.AddArg(p) 21839 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 21840 v0.AuxInt = 1 21841 v0.AddArg(i) 21842 v.AddArg(v0) 21843 v.AddArg(mem) 21844 return true 21845 } 21846 return false 21847 } 21848 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { 21849 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 21850 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 21851 for { 21852 c := v.AuxInt 21853 sym := v.Aux 21854 mem := v.Args[3] 21855 ptr := v.Args[0] 21856 v_1 := v.Args[1] 21857 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 { 21858 break 21859 } 21860 idx := v_1.Args[0] 21861 val := v.Args[2] 21862 v.reset(OpAMD64MOVWstoreidx2) 21863 v.AuxInt = c 21864 v.Aux = sym 21865 v.AddArg(ptr) 21866 v.AddArg(idx) 21867 v.AddArg(val) 21868 v.AddArg(mem) 21869 return true 21870 } 21871 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 21872 // cond: is32Bit(c+d) 21873 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 21874 for { 21875 c := v.AuxInt 21876 sym := v.Aux 21877 mem := v.Args[3] 21878 v_0 := v.Args[0] 21879 if v_0.Op != OpAMD64ADDQconst { 21880 break 21881 } 21882 d := v_0.AuxInt 21883 ptr := v_0.Args[0] 21884 idx := v.Args[1] 21885 val := v.Args[2] 21886 if !(is32Bit(c + d)) { 21887 break 21888 } 21889 v.reset(OpAMD64MOVWstoreidx1) 21890 v.AuxInt = c + d 21891 v.Aux = sym 21892 v.AddArg(ptr) 21893 v.AddArg(idx) 21894 v.AddArg(val) 21895 v.AddArg(mem) 21896 return true 21897 } 21898 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 21899 // cond: is32Bit(c+d) 21900 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 21901 for { 21902 c := v.AuxInt 21903 sym := v.Aux 21904 mem := v.Args[3] 21905 ptr := v.Args[0] 21906 v_1 := v.Args[1] 21907 if v_1.Op != OpAMD64ADDQconst { 21908 break 21909 } 21910 d := v_1.AuxInt 21911 idx := v_1.Args[0] 21912 val := v.Args[2] 21913 if !(is32Bit(c + d)) { 21914 break 21915 } 21916 v.reset(OpAMD64MOVWstoreidx1) 21917 v.AuxInt = c + d 21918 v.Aux = sym 21919 v.AddArg(ptr) 21920 v.AddArg(idx) 21921 v.AddArg(val) 21922 v.AddArg(mem) 21923 return true 21924 } 21925 // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 21926 // cond: x.Uses == 1 && clobber(x) 21927 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 21928 for { 21929 i := v.AuxInt 21930 s := v.Aux 21931 _ = v.Args[3] 21932 p := v.Args[0] 21933 idx := v.Args[1] 21934 v_2 := v.Args[2] 21935 if v_2.Op != OpAMD64SHRLconst || v_2.AuxInt != 16 { 21936 break 21937 } 21938 w := v_2.Args[0] 21939 x := v.Args[3] 21940 if x.Op != OpAMD64MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { 21941 break 21942 } 21943 mem := x.Args[3] 21944 if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { 21945 break 21946 } 21947 v.reset(OpAMD64MOVLstoreidx1) 21948 v.AuxInt = i - 2 21949 v.Aux = s 21950 v.AddArg(p) 21951 v.AddArg(idx) 21952 v.AddArg(w) 21953 v.AddArg(mem) 21954 return true 21955 } 21956 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 21957 // cond: x.Uses == 1 && clobber(x) 21958 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 21959 for { 21960 i := v.AuxInt 21961 s := v.Aux 21962 _ = v.Args[3] 21963 p := v.Args[0] 21964 idx := v.Args[1] 21965 v_2 := v.Args[2] 21966 if v_2.Op != OpAMD64SHRQconst || v_2.AuxInt != 16 { 21967 break 21968 } 21969 w := v_2.Args[0] 21970 x := v.Args[3] 21971 if x.Op != OpAMD64MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { 21972 break 21973 } 21974 mem := x.Args[3] 21975 if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { 21976 break 21977 } 21978 v.reset(OpAMD64MOVLstoreidx1) 21979 v.AuxInt = i - 2 21980 v.Aux = s 21981 v.AddArg(p) 21982 v.AddArg(idx) 21983 v.AddArg(w) 21984 v.AddArg(mem) 21985 return true 21986 } 21987 // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem)) 21988 // cond: x.Uses == 1 && clobber(x) 21989 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 21990 for { 21991 i := v.AuxInt 21992 s := v.Aux 21993 _ = v.Args[3] 21994 p := v.Args[0] 21995 idx := v.Args[1] 21996 v_2 := v.Args[2] 21997 if v_2.Op != OpAMD64SHRLconst { 21998 break 21999 } 22000 j := v_2.AuxInt 22001 w := v_2.Args[0] 22002 x := v.Args[3] 22003 if x.Op != OpAMD64MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { 22004 break 22005 } 22006 mem := x.Args[3] 22007 if p != x.Args[0] || idx != x.Args[1] { 22008 break 22009 } 22010 w0 := x.Args[2] 22011 if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 22012 break 22013 } 22014 v.reset(OpAMD64MOVLstoreidx1) 22015 v.AuxInt = i - 2 22016 v.Aux = s 22017 v.AddArg(p) 22018 v.AddArg(idx) 22019 v.AddArg(w0) 22020 v.AddArg(mem) 22021 return true 22022 } 22023 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 22024 // cond: x.Uses == 1 && clobber(x) 22025 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 22026 for { 22027 i := v.AuxInt 22028 s := v.Aux 22029 _ = v.Args[3] 22030 p := v.Args[0] 22031 idx := v.Args[1] 22032 v_2 := v.Args[2] 22033 if v_2.Op != OpAMD64SHRQconst { 22034 break 22035 } 22036 j := v_2.AuxInt 22037 w := v_2.Args[0] 22038 x := v.Args[3] 22039 if x.Op != OpAMD64MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s { 22040 break 22041 } 22042 mem := x.Args[3] 22043 if p != x.Args[0] || idx != x.Args[1] { 22044 break 22045 } 22046 w0 := x.Args[2] 22047 if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 22048 break 22049 } 22050 v.reset(OpAMD64MOVLstoreidx1) 22051 v.AuxInt = i - 2 22052 v.Aux = s 22053 v.AddArg(p) 22054 v.AddArg(idx) 22055 v.AddArg(w0) 22056 v.AddArg(mem) 22057 return true 22058 } 22059 // match: (MOVWstoreidx1 [i] {s} p (MOVQconst [c]) w mem) 22060 // cond: is32Bit(i+c) 22061 // result: (MOVWstore [i+c] {s} p w mem) 22062 for { 22063 i := v.AuxInt 22064 s := v.Aux 22065 mem := v.Args[3] 22066 p := v.Args[0] 22067 v_1 := v.Args[1] 22068 if v_1.Op != OpAMD64MOVQconst { 22069 break 22070 } 22071 c := v_1.AuxInt 22072 w := v.Args[2] 22073 if !(is32Bit(i + c)) { 22074 break 22075 } 22076 v.reset(OpAMD64MOVWstore) 22077 v.AuxInt = i + c 22078 v.Aux = s 22079 v.AddArg(p) 22080 v.AddArg(w) 22081 v.AddArg(mem) 22082 return true 22083 } 22084 return false 22085 } 22086 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { 22087 b := v.Block 22088 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 22089 // cond: is32Bit(c+d) 22090 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 22091 for { 22092 c := v.AuxInt 22093 sym := v.Aux 22094 mem := v.Args[3] 22095 v_0 := v.Args[0] 22096 if v_0.Op != OpAMD64ADDQconst { 22097 break 22098 } 22099 d := v_0.AuxInt 22100 ptr := v_0.Args[0] 22101 idx := v.Args[1] 22102 val := v.Args[2] 22103 if !(is32Bit(c + d)) { 22104 break 22105 } 22106 v.reset(OpAMD64MOVWstoreidx2) 22107 v.AuxInt = c + d 22108 v.Aux = sym 22109 v.AddArg(ptr) 22110 v.AddArg(idx) 22111 v.AddArg(val) 22112 v.AddArg(mem) 22113 return true 22114 } 22115 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 22116 // cond: is32Bit(c+2*d) 22117 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 22118 for { 22119 c := v.AuxInt 22120 sym := v.Aux 22121 mem := v.Args[3] 22122 ptr := v.Args[0] 22123 v_1 := v.Args[1] 22124 if v_1.Op != OpAMD64ADDQconst { 22125 break 22126 } 22127 d := v_1.AuxInt 22128 idx := v_1.Args[0] 22129 val := v.Args[2] 22130 if !(is32Bit(c + 2*d)) { 22131 break 22132 } 22133 v.reset(OpAMD64MOVWstoreidx2) 22134 v.AuxInt = c + 2*d 22135 v.Aux = sym 22136 v.AddArg(ptr) 22137 v.AddArg(idx) 22138 v.AddArg(val) 22139 v.AddArg(mem) 22140 return true 22141 } 22142 // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 22143 // cond: x.Uses == 1 && clobber(x) 22144 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 22145 for { 22146 i := v.AuxInt 22147 s := v.Aux 22148 _ = v.Args[3] 22149 p := v.Args[0] 22150 idx := v.Args[1] 22151 v_2 := v.Args[2] 22152 if v_2.Op != OpAMD64SHRLconst || v_2.AuxInt != 16 { 22153 break 22154 } 22155 w := v_2.Args[0] 22156 x := v.Args[3] 22157 if x.Op != OpAMD64MOVWstoreidx2 || x.AuxInt != i-2 || x.Aux != s { 22158 break 22159 } 22160 mem := x.Args[3] 22161 if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { 22162 break 22163 } 22164 v.reset(OpAMD64MOVLstoreidx1) 22165 v.AuxInt = i - 2 22166 v.Aux = s 22167 v.AddArg(p) 22168 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 22169 v0.AuxInt = 1 22170 v0.AddArg(idx) 22171 v.AddArg(v0) 22172 v.AddArg(w) 22173 v.AddArg(mem) 22174 return true 22175 } 22176 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 22177 // cond: x.Uses == 1 && clobber(x) 22178 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 22179 for { 22180 i := v.AuxInt 22181 s := v.Aux 22182 _ = v.Args[3] 22183 p := v.Args[0] 22184 idx := v.Args[1] 22185 v_2 := v.Args[2] 22186 if v_2.Op != OpAMD64SHRQconst || v_2.AuxInt != 16 { 22187 break 22188 } 22189 w := v_2.Args[0] 22190 x := v.Args[3] 22191 if x.Op != OpAMD64MOVWstoreidx2 || x.AuxInt != i-2 || x.Aux != s { 22192 break 22193 } 22194 mem := x.Args[3] 22195 if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) { 22196 break 22197 } 22198 v.reset(OpAMD64MOVLstoreidx1) 22199 v.AuxInt = i - 2 22200 v.Aux = s 22201 v.AddArg(p) 22202 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 22203 v0.AuxInt = 1 22204 v0.AddArg(idx) 22205 v.AddArg(v0) 22206 v.AddArg(w) 22207 v.AddArg(mem) 22208 return true 22209 } 22210 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 22211 // cond: x.Uses == 1 && clobber(x) 22212 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 22213 for { 22214 i := v.AuxInt 22215 s := v.Aux 22216 _ = v.Args[3] 22217 p := v.Args[0] 22218 idx := v.Args[1] 22219 v_2 := v.Args[2] 22220 if v_2.Op != OpAMD64SHRQconst { 22221 break 22222 } 22223 j := v_2.AuxInt 22224 w := v_2.Args[0] 22225 x := v.Args[3] 22226 if x.Op != OpAMD64MOVWstoreidx2 || x.AuxInt != i-2 || x.Aux != s { 22227 break 22228 } 22229 mem := x.Args[3] 22230 if p != x.Args[0] || idx != x.Args[1] { 22231 break 22232 } 22233 w0 := x.Args[2] 22234 if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { 22235 break 22236 } 22237 v.reset(OpAMD64MOVLstoreidx1) 22238 v.AuxInt = i - 2 22239 v.Aux = s 22240 v.AddArg(p) 22241 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 22242 v0.AuxInt = 1 22243 v0.AddArg(idx) 22244 v.AddArg(v0) 22245 v.AddArg(w0) 22246 v.AddArg(mem) 22247 return true 22248 } 22249 // match: (MOVWstoreidx2 [i] {s} p (MOVQconst [c]) w mem) 22250 // cond: is32Bit(i+2*c) 22251 // result: (MOVWstore [i+2*c] {s} p w mem) 22252 for { 22253 i := v.AuxInt 22254 s := v.Aux 22255 mem := v.Args[3] 22256 p := v.Args[0] 22257 v_1 := v.Args[1] 22258 if v_1.Op != OpAMD64MOVQconst { 22259 break 22260 } 22261 c := v_1.AuxInt 22262 w := v.Args[2] 22263 if !(is32Bit(i + 2*c)) { 22264 break 22265 } 22266 v.reset(OpAMD64MOVWstore) 22267 v.AuxInt = i + 2*c 22268 v.Aux = s 22269 v.AddArg(p) 22270 v.AddArg(w) 22271 v.AddArg(mem) 22272 return true 22273 } 22274 return false 22275 } 22276 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { 22277 // match: (MULL x (MOVLconst [c])) 22278 // result: (MULLconst [c] x) 22279 for { 22280 _ = v.Args[1] 22281 x := v.Args[0] 22282 v_1 := v.Args[1] 22283 if v_1.Op != OpAMD64MOVLconst { 22284 break 22285 } 22286 c := v_1.AuxInt 22287 v.reset(OpAMD64MULLconst) 22288 v.AuxInt = c 22289 v.AddArg(x) 22290 return true 22291 } 22292 // match: (MULL (MOVLconst [c]) x) 22293 // result: (MULLconst [c] x) 22294 for { 22295 x := v.Args[1] 22296 v_0 := v.Args[0] 22297 if v_0.Op != OpAMD64MOVLconst { 22298 break 22299 } 22300 c := v_0.AuxInt 22301 v.reset(OpAMD64MULLconst) 22302 v.AuxInt = c 22303 v.AddArg(x) 22304 return true 22305 } 22306 return false 22307 } 22308 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { 22309 b := v.Block 22310 // match: (MULLconst [c] (MULLconst [d] x)) 22311 // result: (MULLconst [int64(int32(c * d))] x) 22312 for { 22313 c := v.AuxInt 22314 v_0 := v.Args[0] 22315 if v_0.Op != OpAMD64MULLconst { 22316 break 22317 } 22318 d := v_0.AuxInt 22319 x := v_0.Args[0] 22320 v.reset(OpAMD64MULLconst) 22321 v.AuxInt = int64(int32(c * d)) 22322 v.AddArg(x) 22323 return true 22324 } 22325 // match: (MULLconst [-9] x) 22326 // result: (NEGL (LEAL8 <v.Type> x x)) 22327 for { 22328 if v.AuxInt != -9 { 22329 break 22330 } 22331 x := v.Args[0] 22332 v.reset(OpAMD64NEGL) 22333 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 22334 v0.AddArg(x) 22335 v0.AddArg(x) 22336 v.AddArg(v0) 22337 return true 22338 } 22339 // match: (MULLconst [-5] x) 22340 // result: (NEGL (LEAL4 <v.Type> x x)) 22341 for { 22342 if v.AuxInt != -5 { 22343 break 22344 } 22345 x := v.Args[0] 22346 v.reset(OpAMD64NEGL) 22347 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 22348 v0.AddArg(x) 22349 v0.AddArg(x) 22350 v.AddArg(v0) 22351 return true 22352 } 22353 // match: (MULLconst [-3] x) 22354 // result: (NEGL (LEAL2 <v.Type> x x)) 22355 for { 22356 if v.AuxInt != -3 { 22357 break 22358 } 22359 x := v.Args[0] 22360 v.reset(OpAMD64NEGL) 22361 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 22362 v0.AddArg(x) 22363 v0.AddArg(x) 22364 v.AddArg(v0) 22365 return true 22366 } 22367 // match: (MULLconst [-1] x) 22368 // result: (NEGL x) 22369 for { 22370 if v.AuxInt != -1 { 22371 break 22372 } 22373 x := v.Args[0] 22374 v.reset(OpAMD64NEGL) 22375 v.AddArg(x) 22376 return true 22377 } 22378 // match: (MULLconst [ 0] _) 22379 // result: (MOVLconst [0]) 22380 for { 22381 if v.AuxInt != 0 { 22382 break 22383 } 22384 v.reset(OpAMD64MOVLconst) 22385 v.AuxInt = 0 22386 return true 22387 } 22388 // match: (MULLconst [ 1] x) 22389 // result: x 22390 for { 22391 if v.AuxInt != 1 { 22392 break 22393 } 22394 x := v.Args[0] 22395 v.reset(OpCopy) 22396 v.Type = x.Type 22397 v.AddArg(x) 22398 return true 22399 } 22400 // match: (MULLconst [ 3] x) 22401 // result: (LEAL2 x x) 22402 for { 22403 if v.AuxInt != 3 { 22404 break 22405 } 22406 x := v.Args[0] 22407 v.reset(OpAMD64LEAL2) 22408 v.AddArg(x) 22409 v.AddArg(x) 22410 return true 22411 } 22412 // match: (MULLconst [ 5] x) 22413 // result: (LEAL4 x x) 22414 for { 22415 if v.AuxInt != 5 { 22416 break 22417 } 22418 x := v.Args[0] 22419 v.reset(OpAMD64LEAL4) 22420 v.AddArg(x) 22421 v.AddArg(x) 22422 return true 22423 } 22424 // match: (MULLconst [ 7] x) 22425 // result: (LEAL2 x (LEAL2 <v.Type> x x)) 22426 for { 22427 if v.AuxInt != 7 { 22428 break 22429 } 22430 x := v.Args[0] 22431 v.reset(OpAMD64LEAL2) 22432 v.AddArg(x) 22433 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 22434 v0.AddArg(x) 22435 v0.AddArg(x) 22436 v.AddArg(v0) 22437 return true 22438 } 22439 return false 22440 } 22441 func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool { 22442 b := v.Block 22443 // match: (MULLconst [ 9] x) 22444 // result: (LEAL8 x x) 22445 for { 22446 if v.AuxInt != 9 { 22447 break 22448 } 22449 x := v.Args[0] 22450 v.reset(OpAMD64LEAL8) 22451 v.AddArg(x) 22452 v.AddArg(x) 22453 return true 22454 } 22455 // match: (MULLconst [11] x) 22456 // result: (LEAL2 x (LEAL4 <v.Type> x x)) 22457 for { 22458 if v.AuxInt != 11 { 22459 break 22460 } 22461 x := v.Args[0] 22462 v.reset(OpAMD64LEAL2) 22463 v.AddArg(x) 22464 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 22465 v0.AddArg(x) 22466 v0.AddArg(x) 22467 v.AddArg(v0) 22468 return true 22469 } 22470 // match: (MULLconst [13] x) 22471 // result: (LEAL4 x (LEAL2 <v.Type> x x)) 22472 for { 22473 if v.AuxInt != 13 { 22474 break 22475 } 22476 x := v.Args[0] 22477 v.reset(OpAMD64LEAL4) 22478 v.AddArg(x) 22479 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 22480 v0.AddArg(x) 22481 v0.AddArg(x) 22482 v.AddArg(v0) 22483 return true 22484 } 22485 // match: (MULLconst [19] x) 22486 // result: (LEAL2 x (LEAL8 <v.Type> x x)) 22487 for { 22488 if v.AuxInt != 19 { 22489 break 22490 } 22491 x := v.Args[0] 22492 v.reset(OpAMD64LEAL2) 22493 v.AddArg(x) 22494 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 22495 v0.AddArg(x) 22496 v0.AddArg(x) 22497 v.AddArg(v0) 22498 return true 22499 } 22500 // match: (MULLconst [21] x) 22501 // result: (LEAL4 x (LEAL4 <v.Type> x x)) 22502 for { 22503 if v.AuxInt != 21 { 22504 break 22505 } 22506 x := v.Args[0] 22507 v.reset(OpAMD64LEAL4) 22508 v.AddArg(x) 22509 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 22510 v0.AddArg(x) 22511 v0.AddArg(x) 22512 v.AddArg(v0) 22513 return true 22514 } 22515 // match: (MULLconst [25] x) 22516 // result: (LEAL8 x (LEAL2 <v.Type> x x)) 22517 for { 22518 if v.AuxInt != 25 { 22519 break 22520 } 22521 x := v.Args[0] 22522 v.reset(OpAMD64LEAL8) 22523 v.AddArg(x) 22524 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 22525 v0.AddArg(x) 22526 v0.AddArg(x) 22527 v.AddArg(v0) 22528 return true 22529 } 22530 // match: (MULLconst [27] x) 22531 // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x)) 22532 for { 22533 if v.AuxInt != 27 { 22534 break 22535 } 22536 x := v.Args[0] 22537 v.reset(OpAMD64LEAL8) 22538 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 22539 v0.AddArg(x) 22540 v0.AddArg(x) 22541 v.AddArg(v0) 22542 v1 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 22543 v1.AddArg(x) 22544 v1.AddArg(x) 22545 v.AddArg(v1) 22546 return true 22547 } 22548 // match: (MULLconst [37] x) 22549 // result: (LEAL4 x (LEAL8 <v.Type> x x)) 22550 for { 22551 if v.AuxInt != 37 { 22552 break 22553 } 22554 x := v.Args[0] 22555 v.reset(OpAMD64LEAL4) 22556 v.AddArg(x) 22557 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 22558 v0.AddArg(x) 22559 v0.AddArg(x) 22560 v.AddArg(v0) 22561 return true 22562 } 22563 // match: (MULLconst [41] x) 22564 // result: (LEAL8 x (LEAL4 <v.Type> x x)) 22565 for { 22566 if v.AuxInt != 41 { 22567 break 22568 } 22569 x := v.Args[0] 22570 v.reset(OpAMD64LEAL8) 22571 v.AddArg(x) 22572 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 22573 v0.AddArg(x) 22574 v0.AddArg(x) 22575 v.AddArg(v0) 22576 return true 22577 } 22578 // match: (MULLconst [45] x) 22579 // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x)) 22580 for { 22581 if v.AuxInt != 45 { 22582 break 22583 } 22584 x := v.Args[0] 22585 v.reset(OpAMD64LEAL8) 22586 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 22587 v0.AddArg(x) 22588 v0.AddArg(x) 22589 v.AddArg(v0) 22590 v1 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 22591 v1.AddArg(x) 22592 v1.AddArg(x) 22593 v.AddArg(v1) 22594 return true 22595 } 22596 return false 22597 } 22598 func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool { 22599 b := v.Block 22600 // match: (MULLconst [73] x) 22601 // result: (LEAL8 x (LEAL8 <v.Type> x x)) 22602 for { 22603 if v.AuxInt != 73 { 22604 break 22605 } 22606 x := v.Args[0] 22607 v.reset(OpAMD64LEAL8) 22608 v.AddArg(x) 22609 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 22610 v0.AddArg(x) 22611 v0.AddArg(x) 22612 v.AddArg(v0) 22613 return true 22614 } 22615 // match: (MULLconst [81] x) 22616 // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x)) 22617 for { 22618 if v.AuxInt != 81 { 22619 break 22620 } 22621 x := v.Args[0] 22622 v.reset(OpAMD64LEAL8) 22623 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 22624 v0.AddArg(x) 22625 v0.AddArg(x) 22626 v.AddArg(v0) 22627 v1 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 22628 v1.AddArg(x) 22629 v1.AddArg(x) 22630 v.AddArg(v1) 22631 return true 22632 } 22633 // match: (MULLconst [c] x) 22634 // cond: isPowerOfTwo(c+1) && c >= 15 22635 // result: (SUBL (SHLLconst <v.Type> [log2(c+1)] x) x) 22636 for { 22637 c := v.AuxInt 22638 x := v.Args[0] 22639 if !(isPowerOfTwo(c+1) && c >= 15) { 22640 break 22641 } 22642 v.reset(OpAMD64SUBL) 22643 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22644 v0.AuxInt = log2(c + 1) 22645 v0.AddArg(x) 22646 v.AddArg(v0) 22647 v.AddArg(x) 22648 return true 22649 } 22650 // match: (MULLconst [c] x) 22651 // cond: isPowerOfTwo(c-1) && c >= 17 22652 // result: (LEAL1 (SHLLconst <v.Type> [log2(c-1)] x) x) 22653 for { 22654 c := v.AuxInt 22655 x := v.Args[0] 22656 if !(isPowerOfTwo(c-1) && c >= 17) { 22657 break 22658 } 22659 v.reset(OpAMD64LEAL1) 22660 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22661 v0.AuxInt = log2(c - 1) 22662 v0.AddArg(x) 22663 v.AddArg(v0) 22664 v.AddArg(x) 22665 return true 22666 } 22667 // match: (MULLconst [c] x) 22668 // cond: isPowerOfTwo(c-2) && c >= 34 22669 // result: (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x) 22670 for { 22671 c := v.AuxInt 22672 x := v.Args[0] 22673 if !(isPowerOfTwo(c-2) && c >= 34) { 22674 break 22675 } 22676 v.reset(OpAMD64LEAL2) 22677 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22678 v0.AuxInt = log2(c - 2) 22679 v0.AddArg(x) 22680 v.AddArg(v0) 22681 v.AddArg(x) 22682 return true 22683 } 22684 // match: (MULLconst [c] x) 22685 // cond: isPowerOfTwo(c-4) && c >= 68 22686 // result: (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x) 22687 for { 22688 c := v.AuxInt 22689 x := v.Args[0] 22690 if !(isPowerOfTwo(c-4) && c >= 68) { 22691 break 22692 } 22693 v.reset(OpAMD64LEAL4) 22694 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22695 v0.AuxInt = log2(c - 4) 22696 v0.AddArg(x) 22697 v.AddArg(v0) 22698 v.AddArg(x) 22699 return true 22700 } 22701 // match: (MULLconst [c] x) 22702 // cond: isPowerOfTwo(c-8) && c >= 136 22703 // result: (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x) 22704 for { 22705 c := v.AuxInt 22706 x := v.Args[0] 22707 if !(isPowerOfTwo(c-8) && c >= 136) { 22708 break 22709 } 22710 v.reset(OpAMD64LEAL8) 22711 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22712 v0.AuxInt = log2(c - 8) 22713 v0.AddArg(x) 22714 v.AddArg(v0) 22715 v.AddArg(x) 22716 return true 22717 } 22718 // match: (MULLconst [c] x) 22719 // cond: c%3 == 0 && isPowerOfTwo(c/3) 22720 // result: (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x)) 22721 for { 22722 c := v.AuxInt 22723 x := v.Args[0] 22724 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 22725 break 22726 } 22727 v.reset(OpAMD64SHLLconst) 22728 v.AuxInt = log2(c / 3) 22729 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) 22730 v0.AddArg(x) 22731 v0.AddArg(x) 22732 v.AddArg(v0) 22733 return true 22734 } 22735 // match: (MULLconst [c] x) 22736 // cond: c%5 == 0 && isPowerOfTwo(c/5) 22737 // result: (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x)) 22738 for { 22739 c := v.AuxInt 22740 x := v.Args[0] 22741 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 22742 break 22743 } 22744 v.reset(OpAMD64SHLLconst) 22745 v.AuxInt = log2(c / 5) 22746 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) 22747 v0.AddArg(x) 22748 v0.AddArg(x) 22749 v.AddArg(v0) 22750 return true 22751 } 22752 // match: (MULLconst [c] x) 22753 // cond: c%9 == 0 && isPowerOfTwo(c/9) 22754 // result: (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x)) 22755 for { 22756 c := v.AuxInt 22757 x := v.Args[0] 22758 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 22759 break 22760 } 22761 v.reset(OpAMD64SHLLconst) 22762 v.AuxInt = log2(c / 9) 22763 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) 22764 v0.AddArg(x) 22765 v0.AddArg(x) 22766 v.AddArg(v0) 22767 return true 22768 } 22769 return false 22770 } 22771 func rewriteValueAMD64_OpAMD64MULLconst_30(v *Value) bool { 22772 // match: (MULLconst [c] (MOVLconst [d])) 22773 // result: (MOVLconst [int64(int32(c*d))]) 22774 for { 22775 c := v.AuxInt 22776 v_0 := v.Args[0] 22777 if v_0.Op != OpAMD64MOVLconst { 22778 break 22779 } 22780 d := v_0.AuxInt 22781 v.reset(OpAMD64MOVLconst) 22782 v.AuxInt = int64(int32(c * d)) 22783 return true 22784 } 22785 return false 22786 } 22787 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { 22788 // match: (MULQ x (MOVQconst [c])) 22789 // cond: is32Bit(c) 22790 // result: (MULQconst [c] x) 22791 for { 22792 _ = v.Args[1] 22793 x := v.Args[0] 22794 v_1 := v.Args[1] 22795 if v_1.Op != OpAMD64MOVQconst { 22796 break 22797 } 22798 c := v_1.AuxInt 22799 if !(is32Bit(c)) { 22800 break 22801 } 22802 v.reset(OpAMD64MULQconst) 22803 v.AuxInt = c 22804 v.AddArg(x) 22805 return true 22806 } 22807 // match: (MULQ (MOVQconst [c]) x) 22808 // cond: is32Bit(c) 22809 // result: (MULQconst [c] x) 22810 for { 22811 x := v.Args[1] 22812 v_0 := v.Args[0] 22813 if v_0.Op != OpAMD64MOVQconst { 22814 break 22815 } 22816 c := v_0.AuxInt 22817 if !(is32Bit(c)) { 22818 break 22819 } 22820 v.reset(OpAMD64MULQconst) 22821 v.AuxInt = c 22822 v.AddArg(x) 22823 return true 22824 } 22825 return false 22826 } 22827 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { 22828 b := v.Block 22829 // match: (MULQconst [c] (MULQconst [d] x)) 22830 // cond: is32Bit(c*d) 22831 // result: (MULQconst [c * d] x) 22832 for { 22833 c := v.AuxInt 22834 v_0 := v.Args[0] 22835 if v_0.Op != OpAMD64MULQconst { 22836 break 22837 } 22838 d := v_0.AuxInt 22839 x := v_0.Args[0] 22840 if !(is32Bit(c * d)) { 22841 break 22842 } 22843 v.reset(OpAMD64MULQconst) 22844 v.AuxInt = c * d 22845 v.AddArg(x) 22846 return true 22847 } 22848 // match: (MULQconst [-9] x) 22849 // result: (NEGQ (LEAQ8 <v.Type> x x)) 22850 for { 22851 if v.AuxInt != -9 { 22852 break 22853 } 22854 x := v.Args[0] 22855 v.reset(OpAMD64NEGQ) 22856 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 22857 v0.AddArg(x) 22858 v0.AddArg(x) 22859 v.AddArg(v0) 22860 return true 22861 } 22862 // match: (MULQconst [-5] x) 22863 // result: (NEGQ (LEAQ4 <v.Type> x x)) 22864 for { 22865 if v.AuxInt != -5 { 22866 break 22867 } 22868 x := v.Args[0] 22869 v.reset(OpAMD64NEGQ) 22870 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 22871 v0.AddArg(x) 22872 v0.AddArg(x) 22873 v.AddArg(v0) 22874 return true 22875 } 22876 // match: (MULQconst [-3] x) 22877 // result: (NEGQ (LEAQ2 <v.Type> x x)) 22878 for { 22879 if v.AuxInt != -3 { 22880 break 22881 } 22882 x := v.Args[0] 22883 v.reset(OpAMD64NEGQ) 22884 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 22885 v0.AddArg(x) 22886 v0.AddArg(x) 22887 v.AddArg(v0) 22888 return true 22889 } 22890 // match: (MULQconst [-1] x) 22891 // result: (NEGQ x) 22892 for { 22893 if v.AuxInt != -1 { 22894 break 22895 } 22896 x := v.Args[0] 22897 v.reset(OpAMD64NEGQ) 22898 v.AddArg(x) 22899 return true 22900 } 22901 // match: (MULQconst [ 0] _) 22902 // result: (MOVQconst [0]) 22903 for { 22904 if v.AuxInt != 0 { 22905 break 22906 } 22907 v.reset(OpAMD64MOVQconst) 22908 v.AuxInt = 0 22909 return true 22910 } 22911 // match: (MULQconst [ 1] x) 22912 // result: x 22913 for { 22914 if v.AuxInt != 1 { 22915 break 22916 } 22917 x := v.Args[0] 22918 v.reset(OpCopy) 22919 v.Type = x.Type 22920 v.AddArg(x) 22921 return true 22922 } 22923 // match: (MULQconst [ 3] x) 22924 // result: (LEAQ2 x x) 22925 for { 22926 if v.AuxInt != 3 { 22927 break 22928 } 22929 x := v.Args[0] 22930 v.reset(OpAMD64LEAQ2) 22931 v.AddArg(x) 22932 v.AddArg(x) 22933 return true 22934 } 22935 // match: (MULQconst [ 5] x) 22936 // result: (LEAQ4 x x) 22937 for { 22938 if v.AuxInt != 5 { 22939 break 22940 } 22941 x := v.Args[0] 22942 v.reset(OpAMD64LEAQ4) 22943 v.AddArg(x) 22944 v.AddArg(x) 22945 return true 22946 } 22947 // match: (MULQconst [ 7] x) 22948 // result: (LEAQ2 x (LEAQ2 <v.Type> x x)) 22949 for { 22950 if v.AuxInt != 7 { 22951 break 22952 } 22953 x := v.Args[0] 22954 v.reset(OpAMD64LEAQ2) 22955 v.AddArg(x) 22956 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 22957 v0.AddArg(x) 22958 v0.AddArg(x) 22959 v.AddArg(v0) 22960 return true 22961 } 22962 return false 22963 } 22964 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { 22965 b := v.Block 22966 // match: (MULQconst [ 9] x) 22967 // result: (LEAQ8 x x) 22968 for { 22969 if v.AuxInt != 9 { 22970 break 22971 } 22972 x := v.Args[0] 22973 v.reset(OpAMD64LEAQ8) 22974 v.AddArg(x) 22975 v.AddArg(x) 22976 return true 22977 } 22978 // match: (MULQconst [11] x) 22979 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 22980 for { 22981 if v.AuxInt != 11 { 22982 break 22983 } 22984 x := v.Args[0] 22985 v.reset(OpAMD64LEAQ2) 22986 v.AddArg(x) 22987 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 22988 v0.AddArg(x) 22989 v0.AddArg(x) 22990 v.AddArg(v0) 22991 return true 22992 } 22993 // match: (MULQconst [13] x) 22994 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 22995 for { 22996 if v.AuxInt != 13 { 22997 break 22998 } 22999 x := v.Args[0] 23000 v.reset(OpAMD64LEAQ4) 23001 v.AddArg(x) 23002 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 23003 v0.AddArg(x) 23004 v0.AddArg(x) 23005 v.AddArg(v0) 23006 return true 23007 } 23008 // match: (MULQconst [19] x) 23009 // result: (LEAQ2 x (LEAQ8 <v.Type> x x)) 23010 for { 23011 if v.AuxInt != 19 { 23012 break 23013 } 23014 x := v.Args[0] 23015 v.reset(OpAMD64LEAQ2) 23016 v.AddArg(x) 23017 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 23018 v0.AddArg(x) 23019 v0.AddArg(x) 23020 v.AddArg(v0) 23021 return true 23022 } 23023 // match: (MULQconst [21] x) 23024 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 23025 for { 23026 if v.AuxInt != 21 { 23027 break 23028 } 23029 x := v.Args[0] 23030 v.reset(OpAMD64LEAQ4) 23031 v.AddArg(x) 23032 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 23033 v0.AddArg(x) 23034 v0.AddArg(x) 23035 v.AddArg(v0) 23036 return true 23037 } 23038 // match: (MULQconst [25] x) 23039 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 23040 for { 23041 if v.AuxInt != 25 { 23042 break 23043 } 23044 x := v.Args[0] 23045 v.reset(OpAMD64LEAQ8) 23046 v.AddArg(x) 23047 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 23048 v0.AddArg(x) 23049 v0.AddArg(x) 23050 v.AddArg(v0) 23051 return true 23052 } 23053 // match: (MULQconst [27] x) 23054 // result: (LEAQ8 (LEAQ2 <v.Type> x x) (LEAQ2 <v.Type> x x)) 23055 for { 23056 if v.AuxInt != 27 { 23057 break 23058 } 23059 x := v.Args[0] 23060 v.reset(OpAMD64LEAQ8) 23061 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 23062 v0.AddArg(x) 23063 v0.AddArg(x) 23064 v.AddArg(v0) 23065 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 23066 v1.AddArg(x) 23067 v1.AddArg(x) 23068 v.AddArg(v1) 23069 return true 23070 } 23071 // match: (MULQconst [37] x) 23072 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 23073 for { 23074 if v.AuxInt != 37 { 23075 break 23076 } 23077 x := v.Args[0] 23078 v.reset(OpAMD64LEAQ4) 23079 v.AddArg(x) 23080 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 23081 v0.AddArg(x) 23082 v0.AddArg(x) 23083 v.AddArg(v0) 23084 return true 23085 } 23086 // match: (MULQconst [41] x) 23087 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 23088 for { 23089 if v.AuxInt != 41 { 23090 break 23091 } 23092 x := v.Args[0] 23093 v.reset(OpAMD64LEAQ8) 23094 v.AddArg(x) 23095 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 23096 v0.AddArg(x) 23097 v0.AddArg(x) 23098 v.AddArg(v0) 23099 return true 23100 } 23101 // match: (MULQconst [45] x) 23102 // result: (LEAQ8 (LEAQ4 <v.Type> x x) (LEAQ4 <v.Type> x x)) 23103 for { 23104 if v.AuxInt != 45 { 23105 break 23106 } 23107 x := v.Args[0] 23108 v.reset(OpAMD64LEAQ8) 23109 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 23110 v0.AddArg(x) 23111 v0.AddArg(x) 23112 v.AddArg(v0) 23113 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 23114 v1.AddArg(x) 23115 v1.AddArg(x) 23116 v.AddArg(v1) 23117 return true 23118 } 23119 return false 23120 } 23121 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { 23122 b := v.Block 23123 // match: (MULQconst [73] x) 23124 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 23125 for { 23126 if v.AuxInt != 73 { 23127 break 23128 } 23129 x := v.Args[0] 23130 v.reset(OpAMD64LEAQ8) 23131 v.AddArg(x) 23132 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 23133 v0.AddArg(x) 23134 v0.AddArg(x) 23135 v.AddArg(v0) 23136 return true 23137 } 23138 // match: (MULQconst [81] x) 23139 // result: (LEAQ8 (LEAQ8 <v.Type> x x) (LEAQ8 <v.Type> x x)) 23140 for { 23141 if v.AuxInt != 81 { 23142 break 23143 } 23144 x := v.Args[0] 23145 v.reset(OpAMD64LEAQ8) 23146 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 23147 v0.AddArg(x) 23148 v0.AddArg(x) 23149 v.AddArg(v0) 23150 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 23151 v1.AddArg(x) 23152 v1.AddArg(x) 23153 v.AddArg(v1) 23154 return true 23155 } 23156 // match: (MULQconst [c] x) 23157 // cond: isPowerOfTwo(c+1) && c >= 15 23158 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 23159 for { 23160 c := v.AuxInt 23161 x := v.Args[0] 23162 if !(isPowerOfTwo(c+1) && c >= 15) { 23163 break 23164 } 23165 v.reset(OpAMD64SUBQ) 23166 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23167 v0.AuxInt = log2(c + 1) 23168 v0.AddArg(x) 23169 v.AddArg(v0) 23170 v.AddArg(x) 23171 return true 23172 } 23173 // match: (MULQconst [c] x) 23174 // cond: isPowerOfTwo(c-1) && c >= 17 23175 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 23176 for { 23177 c := v.AuxInt 23178 x := v.Args[0] 23179 if !(isPowerOfTwo(c-1) && c >= 17) { 23180 break 23181 } 23182 v.reset(OpAMD64LEAQ1) 23183 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23184 v0.AuxInt = log2(c - 1) 23185 v0.AddArg(x) 23186 v.AddArg(v0) 23187 v.AddArg(x) 23188 return true 23189 } 23190 // match: (MULQconst [c] x) 23191 // cond: isPowerOfTwo(c-2) && c >= 34 23192 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 23193 for { 23194 c := v.AuxInt 23195 x := v.Args[0] 23196 if !(isPowerOfTwo(c-2) && c >= 34) { 23197 break 23198 } 23199 v.reset(OpAMD64LEAQ2) 23200 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23201 v0.AuxInt = log2(c - 2) 23202 v0.AddArg(x) 23203 v.AddArg(v0) 23204 v.AddArg(x) 23205 return true 23206 } 23207 // match: (MULQconst [c] x) 23208 // cond: isPowerOfTwo(c-4) && c >= 68 23209 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 23210 for { 23211 c := v.AuxInt 23212 x := v.Args[0] 23213 if !(isPowerOfTwo(c-4) && c >= 68) { 23214 break 23215 } 23216 v.reset(OpAMD64LEAQ4) 23217 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23218 v0.AuxInt = log2(c - 4) 23219 v0.AddArg(x) 23220 v.AddArg(v0) 23221 v.AddArg(x) 23222 return true 23223 } 23224 // match: (MULQconst [c] x) 23225 // cond: isPowerOfTwo(c-8) && c >= 136 23226 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 23227 for { 23228 c := v.AuxInt 23229 x := v.Args[0] 23230 if !(isPowerOfTwo(c-8) && c >= 136) { 23231 break 23232 } 23233 v.reset(OpAMD64LEAQ8) 23234 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23235 v0.AuxInt = log2(c - 8) 23236 v0.AddArg(x) 23237 v.AddArg(v0) 23238 v.AddArg(x) 23239 return true 23240 } 23241 // match: (MULQconst [c] x) 23242 // cond: c%3 == 0 && isPowerOfTwo(c/3) 23243 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 23244 for { 23245 c := v.AuxInt 23246 x := v.Args[0] 23247 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 23248 break 23249 } 23250 v.reset(OpAMD64SHLQconst) 23251 v.AuxInt = log2(c / 3) 23252 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 23253 v0.AddArg(x) 23254 v0.AddArg(x) 23255 v.AddArg(v0) 23256 return true 23257 } 23258 // match: (MULQconst [c] x) 23259 // cond: c%5 == 0 && isPowerOfTwo(c/5) 23260 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 23261 for { 23262 c := v.AuxInt 23263 x := v.Args[0] 23264 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 23265 break 23266 } 23267 v.reset(OpAMD64SHLQconst) 23268 v.AuxInt = log2(c / 5) 23269 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 23270 v0.AddArg(x) 23271 v0.AddArg(x) 23272 v.AddArg(v0) 23273 return true 23274 } 23275 // match: (MULQconst [c] x) 23276 // cond: c%9 == 0 && isPowerOfTwo(c/9) 23277 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 23278 for { 23279 c := v.AuxInt 23280 x := v.Args[0] 23281 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 23282 break 23283 } 23284 v.reset(OpAMD64SHLQconst) 23285 v.AuxInt = log2(c / 9) 23286 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 23287 v0.AddArg(x) 23288 v0.AddArg(x) 23289 v.AddArg(v0) 23290 return true 23291 } 23292 return false 23293 } 23294 func rewriteValueAMD64_OpAMD64MULQconst_30(v *Value) bool { 23295 // match: (MULQconst [c] (MOVQconst [d])) 23296 // result: (MOVQconst [c*d]) 23297 for { 23298 c := v.AuxInt 23299 v_0 := v.Args[0] 23300 if v_0.Op != OpAMD64MOVQconst { 23301 break 23302 } 23303 d := v_0.AuxInt 23304 v.reset(OpAMD64MOVQconst) 23305 v.AuxInt = c * d 23306 return true 23307 } 23308 // match: (MULQconst [c] (NEGQ x)) 23309 // cond: c != -(1<<31) 23310 // result: (MULQconst [-c] x) 23311 for { 23312 c := v.AuxInt 23313 v_0 := v.Args[0] 23314 if v_0.Op != OpAMD64NEGQ { 23315 break 23316 } 23317 x := v_0.Args[0] 23318 if !(c != -(1 << 31)) { 23319 break 23320 } 23321 v.reset(OpAMD64MULQconst) 23322 v.AuxInt = -c 23323 v.AddArg(x) 23324 return true 23325 } 23326 return false 23327 } 23328 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { 23329 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 23330 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 23331 // result: (MULSDload x [off] {sym} ptr mem) 23332 for { 23333 _ = v.Args[1] 23334 x := v.Args[0] 23335 l := v.Args[1] 23336 if l.Op != OpAMD64MOVSDload { 23337 break 23338 } 23339 off := l.AuxInt 23340 sym := l.Aux 23341 mem := l.Args[1] 23342 ptr := l.Args[0] 23343 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 23344 break 23345 } 23346 v.reset(OpAMD64MULSDload) 23347 v.AuxInt = off 23348 v.Aux = sym 23349 v.AddArg(x) 23350 v.AddArg(ptr) 23351 v.AddArg(mem) 23352 return true 23353 } 23354 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 23355 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 23356 // result: (MULSDload x [off] {sym} ptr mem) 23357 for { 23358 x := v.Args[1] 23359 l := v.Args[0] 23360 if l.Op != OpAMD64MOVSDload { 23361 break 23362 } 23363 off := l.AuxInt 23364 sym := l.Aux 23365 mem := l.Args[1] 23366 ptr := l.Args[0] 23367 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 23368 break 23369 } 23370 v.reset(OpAMD64MULSDload) 23371 v.AuxInt = off 23372 v.Aux = sym 23373 v.AddArg(x) 23374 v.AddArg(ptr) 23375 v.AddArg(mem) 23376 return true 23377 } 23378 return false 23379 } 23380 func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool { 23381 b := v.Block 23382 typ := &b.Func.Config.Types 23383 // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem) 23384 // cond: is32Bit(off1+off2) 23385 // result: (MULSDload [off1+off2] {sym} val base mem) 23386 for { 23387 off1 := v.AuxInt 23388 sym := v.Aux 23389 mem := v.Args[2] 23390 val := v.Args[0] 23391 v_1 := v.Args[1] 23392 if v_1.Op != OpAMD64ADDQconst { 23393 break 23394 } 23395 off2 := v_1.AuxInt 23396 base := v_1.Args[0] 23397 if !(is32Bit(off1 + off2)) { 23398 break 23399 } 23400 v.reset(OpAMD64MULSDload) 23401 v.AuxInt = off1 + off2 23402 v.Aux = sym 23403 v.AddArg(val) 23404 v.AddArg(base) 23405 v.AddArg(mem) 23406 return true 23407 } 23408 // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 23409 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 23410 // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 23411 for { 23412 off1 := v.AuxInt 23413 sym1 := v.Aux 23414 mem := v.Args[2] 23415 val := v.Args[0] 23416 v_1 := v.Args[1] 23417 if v_1.Op != OpAMD64LEAQ { 23418 break 23419 } 23420 off2 := v_1.AuxInt 23421 sym2 := v_1.Aux 23422 base := v_1.Args[0] 23423 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 23424 break 23425 } 23426 v.reset(OpAMD64MULSDload) 23427 v.AuxInt = off1 + off2 23428 v.Aux = mergeSym(sym1, sym2) 23429 v.AddArg(val) 23430 v.AddArg(base) 23431 v.AddArg(mem) 23432 return true 23433 } 23434 // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 23435 // result: (MULSD x (MOVQi2f y)) 23436 for { 23437 off := v.AuxInt 23438 sym := v.Aux 23439 _ = v.Args[2] 23440 x := v.Args[0] 23441 ptr := v.Args[1] 23442 v_2 := v.Args[2] 23443 if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { 23444 break 23445 } 23446 _ = v_2.Args[2] 23447 if ptr != v_2.Args[0] { 23448 break 23449 } 23450 y := v_2.Args[1] 23451 v.reset(OpAMD64MULSD) 23452 v.AddArg(x) 23453 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) 23454 v0.AddArg(y) 23455 v.AddArg(v0) 23456 return true 23457 } 23458 return false 23459 } 23460 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { 23461 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 23462 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 23463 // result: (MULSSload x [off] {sym} ptr mem) 23464 for { 23465 _ = v.Args[1] 23466 x := v.Args[0] 23467 l := v.Args[1] 23468 if l.Op != OpAMD64MOVSSload { 23469 break 23470 } 23471 off := l.AuxInt 23472 sym := l.Aux 23473 mem := l.Args[1] 23474 ptr := l.Args[0] 23475 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 23476 break 23477 } 23478 v.reset(OpAMD64MULSSload) 23479 v.AuxInt = off 23480 v.Aux = sym 23481 v.AddArg(x) 23482 v.AddArg(ptr) 23483 v.AddArg(mem) 23484 return true 23485 } 23486 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 23487 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 23488 // result: (MULSSload x [off] {sym} ptr mem) 23489 for { 23490 x := v.Args[1] 23491 l := v.Args[0] 23492 if l.Op != OpAMD64MOVSSload { 23493 break 23494 } 23495 off := l.AuxInt 23496 sym := l.Aux 23497 mem := l.Args[1] 23498 ptr := l.Args[0] 23499 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 23500 break 23501 } 23502 v.reset(OpAMD64MULSSload) 23503 v.AuxInt = off 23504 v.Aux = sym 23505 v.AddArg(x) 23506 v.AddArg(ptr) 23507 v.AddArg(mem) 23508 return true 23509 } 23510 return false 23511 } 23512 func rewriteValueAMD64_OpAMD64MULSSload_0(v *Value) bool { 23513 b := v.Block 23514 typ := &b.Func.Config.Types 23515 // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem) 23516 // cond: is32Bit(off1+off2) 23517 // result: (MULSSload [off1+off2] {sym} val base mem) 23518 for { 23519 off1 := v.AuxInt 23520 sym := v.Aux 23521 mem := v.Args[2] 23522 val := v.Args[0] 23523 v_1 := v.Args[1] 23524 if v_1.Op != OpAMD64ADDQconst { 23525 break 23526 } 23527 off2 := v_1.AuxInt 23528 base := v_1.Args[0] 23529 if !(is32Bit(off1 + off2)) { 23530 break 23531 } 23532 v.reset(OpAMD64MULSSload) 23533 v.AuxInt = off1 + off2 23534 v.Aux = sym 23535 v.AddArg(val) 23536 v.AddArg(base) 23537 v.AddArg(mem) 23538 return true 23539 } 23540 // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 23541 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 23542 // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 23543 for { 23544 off1 := v.AuxInt 23545 sym1 := v.Aux 23546 mem := v.Args[2] 23547 val := v.Args[0] 23548 v_1 := v.Args[1] 23549 if v_1.Op != OpAMD64LEAQ { 23550 break 23551 } 23552 off2 := v_1.AuxInt 23553 sym2 := v_1.Aux 23554 base := v_1.Args[0] 23555 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 23556 break 23557 } 23558 v.reset(OpAMD64MULSSload) 23559 v.AuxInt = off1 + off2 23560 v.Aux = mergeSym(sym1, sym2) 23561 v.AddArg(val) 23562 v.AddArg(base) 23563 v.AddArg(mem) 23564 return true 23565 } 23566 // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 23567 // result: (MULSS x (MOVLi2f y)) 23568 for { 23569 off := v.AuxInt 23570 sym := v.Aux 23571 _ = v.Args[2] 23572 x := v.Args[0] 23573 ptr := v.Args[1] 23574 v_2 := v.Args[2] 23575 if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { 23576 break 23577 } 23578 _ = v_2.Args[2] 23579 if ptr != v_2.Args[0] { 23580 break 23581 } 23582 y := v_2.Args[1] 23583 v.reset(OpAMD64MULSS) 23584 v.AddArg(x) 23585 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) 23586 v0.AddArg(y) 23587 v.AddArg(v0) 23588 return true 23589 } 23590 return false 23591 } 23592 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { 23593 // match: (NEGL (NEGL x)) 23594 // result: x 23595 for { 23596 v_0 := v.Args[0] 23597 if v_0.Op != OpAMD64NEGL { 23598 break 23599 } 23600 x := v_0.Args[0] 23601 v.reset(OpCopy) 23602 v.Type = x.Type 23603 v.AddArg(x) 23604 return true 23605 } 23606 // match: (NEGL s:(SUBL x y)) 23607 // cond: s.Uses == 1 23608 // result: (SUBL y x) 23609 for { 23610 s := v.Args[0] 23611 if s.Op != OpAMD64SUBL { 23612 break 23613 } 23614 y := s.Args[1] 23615 x := s.Args[0] 23616 if !(s.Uses == 1) { 23617 break 23618 } 23619 v.reset(OpAMD64SUBL) 23620 v.AddArg(y) 23621 v.AddArg(x) 23622 return true 23623 } 23624 // match: (NEGL (MOVLconst [c])) 23625 // result: (MOVLconst [int64(int32(-c))]) 23626 for { 23627 v_0 := v.Args[0] 23628 if v_0.Op != OpAMD64MOVLconst { 23629 break 23630 } 23631 c := v_0.AuxInt 23632 v.reset(OpAMD64MOVLconst) 23633 v.AuxInt = int64(int32(-c)) 23634 return true 23635 } 23636 return false 23637 } 23638 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { 23639 // match: (NEGQ (NEGQ x)) 23640 // result: x 23641 for { 23642 v_0 := v.Args[0] 23643 if v_0.Op != OpAMD64NEGQ { 23644 break 23645 } 23646 x := v_0.Args[0] 23647 v.reset(OpCopy) 23648 v.Type = x.Type 23649 v.AddArg(x) 23650 return true 23651 } 23652 // match: (NEGQ s:(SUBQ x y)) 23653 // cond: s.Uses == 1 23654 // result: (SUBQ y x) 23655 for { 23656 s := v.Args[0] 23657 if s.Op != OpAMD64SUBQ { 23658 break 23659 } 23660 y := s.Args[1] 23661 x := s.Args[0] 23662 if !(s.Uses == 1) { 23663 break 23664 } 23665 v.reset(OpAMD64SUBQ) 23666 v.AddArg(y) 23667 v.AddArg(x) 23668 return true 23669 } 23670 // match: (NEGQ (MOVQconst [c])) 23671 // result: (MOVQconst [-c]) 23672 for { 23673 v_0 := v.Args[0] 23674 if v_0.Op != OpAMD64MOVQconst { 23675 break 23676 } 23677 c := v_0.AuxInt 23678 v.reset(OpAMD64MOVQconst) 23679 v.AuxInt = -c 23680 return true 23681 } 23682 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 23683 // cond: c != -(1<<31) 23684 // result: (ADDQconst [-c] x) 23685 for { 23686 v_0 := v.Args[0] 23687 if v_0.Op != OpAMD64ADDQconst { 23688 break 23689 } 23690 c := v_0.AuxInt 23691 v_0_0 := v_0.Args[0] 23692 if v_0_0.Op != OpAMD64NEGQ { 23693 break 23694 } 23695 x := v_0_0.Args[0] 23696 if !(c != -(1 << 31)) { 23697 break 23698 } 23699 v.reset(OpAMD64ADDQconst) 23700 v.AuxInt = -c 23701 v.AddArg(x) 23702 return true 23703 } 23704 return false 23705 } 23706 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { 23707 // match: (NOTL (MOVLconst [c])) 23708 // result: (MOVLconst [^c]) 23709 for { 23710 v_0 := v.Args[0] 23711 if v_0.Op != OpAMD64MOVLconst { 23712 break 23713 } 23714 c := v_0.AuxInt 23715 v.reset(OpAMD64MOVLconst) 23716 v.AuxInt = ^c 23717 return true 23718 } 23719 return false 23720 } 23721 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { 23722 // match: (NOTQ (MOVQconst [c])) 23723 // result: (MOVQconst [^c]) 23724 for { 23725 v_0 := v.Args[0] 23726 if v_0.Op != OpAMD64MOVQconst { 23727 break 23728 } 23729 c := v_0.AuxInt 23730 v.reset(OpAMD64MOVQconst) 23731 v.AuxInt = ^c 23732 return true 23733 } 23734 return false 23735 } 23736 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { 23737 // match: (ORL (SHLL (MOVLconst [1]) y) x) 23738 // result: (BTSL x y) 23739 for { 23740 x := v.Args[1] 23741 v_0 := v.Args[0] 23742 if v_0.Op != OpAMD64SHLL { 23743 break 23744 } 23745 y := v_0.Args[1] 23746 v_0_0 := v_0.Args[0] 23747 if v_0_0.Op != OpAMD64MOVLconst || v_0_0.AuxInt != 1 { 23748 break 23749 } 23750 v.reset(OpAMD64BTSL) 23751 v.AddArg(x) 23752 v.AddArg(y) 23753 return true 23754 } 23755 // match: (ORL x (SHLL (MOVLconst [1]) y)) 23756 // result: (BTSL x y) 23757 for { 23758 _ = v.Args[1] 23759 x := v.Args[0] 23760 v_1 := v.Args[1] 23761 if v_1.Op != OpAMD64SHLL { 23762 break 23763 } 23764 y := v_1.Args[1] 23765 v_1_0 := v_1.Args[0] 23766 if v_1_0.Op != OpAMD64MOVLconst || v_1_0.AuxInt != 1 { 23767 break 23768 } 23769 v.reset(OpAMD64BTSL) 23770 v.AddArg(x) 23771 v.AddArg(y) 23772 return true 23773 } 23774 // match: (ORL (MOVLconst [c]) x) 23775 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 23776 // result: (BTSLconst [log2uint32(c)] x) 23777 for { 23778 x := v.Args[1] 23779 v_0 := v.Args[0] 23780 if v_0.Op != OpAMD64MOVLconst { 23781 break 23782 } 23783 c := v_0.AuxInt 23784 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { 23785 break 23786 } 23787 v.reset(OpAMD64BTSLconst) 23788 v.AuxInt = log2uint32(c) 23789 v.AddArg(x) 23790 return true 23791 } 23792 // match: (ORL x (MOVLconst [c])) 23793 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 23794 // result: (BTSLconst [log2uint32(c)] x) 23795 for { 23796 _ = v.Args[1] 23797 x := v.Args[0] 23798 v_1 := v.Args[1] 23799 if v_1.Op != OpAMD64MOVLconst { 23800 break 23801 } 23802 c := v_1.AuxInt 23803 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { 23804 break 23805 } 23806 v.reset(OpAMD64BTSLconst) 23807 v.AuxInt = log2uint32(c) 23808 v.AddArg(x) 23809 return true 23810 } 23811 // match: (ORL x (MOVLconst [c])) 23812 // result: (ORLconst [c] x) 23813 for { 23814 _ = v.Args[1] 23815 x := v.Args[0] 23816 v_1 := v.Args[1] 23817 if v_1.Op != OpAMD64MOVLconst { 23818 break 23819 } 23820 c := v_1.AuxInt 23821 v.reset(OpAMD64ORLconst) 23822 v.AuxInt = c 23823 v.AddArg(x) 23824 return true 23825 } 23826 // match: (ORL (MOVLconst [c]) x) 23827 // result: (ORLconst [c] x) 23828 for { 23829 x := v.Args[1] 23830 v_0 := v.Args[0] 23831 if v_0.Op != OpAMD64MOVLconst { 23832 break 23833 } 23834 c := v_0.AuxInt 23835 v.reset(OpAMD64ORLconst) 23836 v.AuxInt = c 23837 v.AddArg(x) 23838 return true 23839 } 23840 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 23841 // cond: d==32-c 23842 // result: (ROLLconst x [c]) 23843 for { 23844 _ = v.Args[1] 23845 v_0 := v.Args[0] 23846 if v_0.Op != OpAMD64SHLLconst { 23847 break 23848 } 23849 c := v_0.AuxInt 23850 x := v_0.Args[0] 23851 v_1 := v.Args[1] 23852 if v_1.Op != OpAMD64SHRLconst { 23853 break 23854 } 23855 d := v_1.AuxInt 23856 if x != v_1.Args[0] || !(d == 32-c) { 23857 break 23858 } 23859 v.reset(OpAMD64ROLLconst) 23860 v.AuxInt = c 23861 v.AddArg(x) 23862 return true 23863 } 23864 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 23865 // cond: d==32-c 23866 // result: (ROLLconst x [c]) 23867 for { 23868 _ = v.Args[1] 23869 v_0 := v.Args[0] 23870 if v_0.Op != OpAMD64SHRLconst { 23871 break 23872 } 23873 d := v_0.AuxInt 23874 x := v_0.Args[0] 23875 v_1 := v.Args[1] 23876 if v_1.Op != OpAMD64SHLLconst { 23877 break 23878 } 23879 c := v_1.AuxInt 23880 if x != v_1.Args[0] || !(d == 32-c) { 23881 break 23882 } 23883 v.reset(OpAMD64ROLLconst) 23884 v.AuxInt = c 23885 v.AddArg(x) 23886 return true 23887 } 23888 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 23889 // cond: d==16-c && c < 16 && t.Size() == 2 23890 // result: (ROLWconst x [c]) 23891 for { 23892 t := v.Type 23893 _ = v.Args[1] 23894 v_0 := v.Args[0] 23895 if v_0.Op != OpAMD64SHLLconst { 23896 break 23897 } 23898 c := v_0.AuxInt 23899 x := v_0.Args[0] 23900 v_1 := v.Args[1] 23901 if v_1.Op != OpAMD64SHRWconst { 23902 break 23903 } 23904 d := v_1.AuxInt 23905 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { 23906 break 23907 } 23908 v.reset(OpAMD64ROLWconst) 23909 v.AuxInt = c 23910 v.AddArg(x) 23911 return true 23912 } 23913 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 23914 // cond: d==16-c && c < 16 && t.Size() == 2 23915 // result: (ROLWconst x [c]) 23916 for { 23917 t := v.Type 23918 _ = v.Args[1] 23919 v_0 := v.Args[0] 23920 if v_0.Op != OpAMD64SHRWconst { 23921 break 23922 } 23923 d := v_0.AuxInt 23924 x := v_0.Args[0] 23925 v_1 := v.Args[1] 23926 if v_1.Op != OpAMD64SHLLconst { 23927 break 23928 } 23929 c := v_1.AuxInt 23930 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { 23931 break 23932 } 23933 v.reset(OpAMD64ROLWconst) 23934 v.AuxInt = c 23935 v.AddArg(x) 23936 return true 23937 } 23938 return false 23939 } 23940 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { 23941 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 23942 // cond: d==8-c && c < 8 && t.Size() == 1 23943 // result: (ROLBconst x [c]) 23944 for { 23945 t := v.Type 23946 _ = v.Args[1] 23947 v_0 := v.Args[0] 23948 if v_0.Op != OpAMD64SHLLconst { 23949 break 23950 } 23951 c := v_0.AuxInt 23952 x := v_0.Args[0] 23953 v_1 := v.Args[1] 23954 if v_1.Op != OpAMD64SHRBconst { 23955 break 23956 } 23957 d := v_1.AuxInt 23958 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { 23959 break 23960 } 23961 v.reset(OpAMD64ROLBconst) 23962 v.AuxInt = c 23963 v.AddArg(x) 23964 return true 23965 } 23966 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 23967 // cond: d==8-c && c < 8 && t.Size() == 1 23968 // result: (ROLBconst x [c]) 23969 for { 23970 t := v.Type 23971 _ = v.Args[1] 23972 v_0 := v.Args[0] 23973 if v_0.Op != OpAMD64SHRBconst { 23974 break 23975 } 23976 d := v_0.AuxInt 23977 x := v_0.Args[0] 23978 v_1 := v.Args[1] 23979 if v_1.Op != OpAMD64SHLLconst { 23980 break 23981 } 23982 c := v_1.AuxInt 23983 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { 23984 break 23985 } 23986 v.reset(OpAMD64ROLBconst) 23987 v.AuxInt = c 23988 v.AddArg(x) 23989 return true 23990 } 23991 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 23992 // result: (ROLL x y) 23993 for { 23994 _ = v.Args[1] 23995 v_0 := v.Args[0] 23996 if v_0.Op != OpAMD64SHLL { 23997 break 23998 } 23999 y := v_0.Args[1] 24000 x := v_0.Args[0] 24001 v_1 := v.Args[1] 24002 if v_1.Op != OpAMD64ANDL { 24003 break 24004 } 24005 _ = v_1.Args[1] 24006 v_1_0 := v_1.Args[0] 24007 if v_1_0.Op != OpAMD64SHRL { 24008 break 24009 } 24010 _ = v_1_0.Args[1] 24011 if x != v_1_0.Args[0] { 24012 break 24013 } 24014 v_1_0_1 := v_1_0.Args[1] 24015 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { 24016 break 24017 } 24018 v_1_1 := v_1.Args[1] 24019 if v_1_1.Op != OpAMD64SBBLcarrymask { 24020 break 24021 } 24022 v_1_1_0 := v_1_1.Args[0] 24023 if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 32 { 24024 break 24025 } 24026 v_1_1_0_0 := v_1_1_0.Args[0] 24027 if v_1_1_0_0.Op != OpAMD64NEGQ { 24028 break 24029 } 24030 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24031 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -32 { 24032 break 24033 } 24034 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24035 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { 24036 break 24037 } 24038 v.reset(OpAMD64ROLL) 24039 v.AddArg(x) 24040 v.AddArg(y) 24041 return true 24042 } 24043 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) 24044 // result: (ROLL x y) 24045 for { 24046 _ = v.Args[1] 24047 v_0 := v.Args[0] 24048 if v_0.Op != OpAMD64SHLL { 24049 break 24050 } 24051 y := v_0.Args[1] 24052 x := v_0.Args[0] 24053 v_1 := v.Args[1] 24054 if v_1.Op != OpAMD64ANDL { 24055 break 24056 } 24057 _ = v_1.Args[1] 24058 v_1_0 := v_1.Args[0] 24059 if v_1_0.Op != OpAMD64SBBLcarrymask { 24060 break 24061 } 24062 v_1_0_0 := v_1_0.Args[0] 24063 if v_1_0_0.Op != OpAMD64CMPQconst || v_1_0_0.AuxInt != 32 { 24064 break 24065 } 24066 v_1_0_0_0 := v_1_0_0.Args[0] 24067 if v_1_0_0_0.Op != OpAMD64NEGQ { 24068 break 24069 } 24070 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24071 if v_1_0_0_0_0.Op != OpAMD64ADDQconst || v_1_0_0_0_0.AuxInt != -32 { 24072 break 24073 } 24074 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24075 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst || v_1_0_0_0_0_0.AuxInt != 31 || y != v_1_0_0_0_0_0.Args[0] { 24076 break 24077 } 24078 v_1_1 := v_1.Args[1] 24079 if v_1_1.Op != OpAMD64SHRL { 24080 break 24081 } 24082 _ = v_1_1.Args[1] 24083 if x != v_1_1.Args[0] { 24084 break 24085 } 24086 v_1_1_1 := v_1_1.Args[1] 24087 if v_1_1_1.Op != OpAMD64NEGQ || y != v_1_1_1.Args[0] { 24088 break 24089 } 24090 v.reset(OpAMD64ROLL) 24091 v.AddArg(x) 24092 v.AddArg(y) 24093 return true 24094 } 24095 // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) 24096 // result: (ROLL x y) 24097 for { 24098 _ = v.Args[1] 24099 v_0 := v.Args[0] 24100 if v_0.Op != OpAMD64ANDL { 24101 break 24102 } 24103 _ = v_0.Args[1] 24104 v_0_0 := v_0.Args[0] 24105 if v_0_0.Op != OpAMD64SHRL { 24106 break 24107 } 24108 _ = v_0_0.Args[1] 24109 x := v_0_0.Args[0] 24110 v_0_0_1 := v_0_0.Args[1] 24111 if v_0_0_1.Op != OpAMD64NEGQ { 24112 break 24113 } 24114 y := v_0_0_1.Args[0] 24115 v_0_1 := v_0.Args[1] 24116 if v_0_1.Op != OpAMD64SBBLcarrymask { 24117 break 24118 } 24119 v_0_1_0 := v_0_1.Args[0] 24120 if v_0_1_0.Op != OpAMD64CMPQconst || v_0_1_0.AuxInt != 32 { 24121 break 24122 } 24123 v_0_1_0_0 := v_0_1_0.Args[0] 24124 if v_0_1_0_0.Op != OpAMD64NEGQ { 24125 break 24126 } 24127 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24128 if v_0_1_0_0_0.Op != OpAMD64ADDQconst || v_0_1_0_0_0.AuxInt != -32 { 24129 break 24130 } 24131 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24132 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0_0_0.AuxInt != 31 || y != v_0_1_0_0_0_0.Args[0] { 24133 break 24134 } 24135 v_1 := v.Args[1] 24136 if v_1.Op != OpAMD64SHLL { 24137 break 24138 } 24139 _ = v_1.Args[1] 24140 if x != v_1.Args[0] || y != v_1.Args[1] { 24141 break 24142 } 24143 v.reset(OpAMD64ROLL) 24144 v.AddArg(x) 24145 v.AddArg(y) 24146 return true 24147 } 24148 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) 24149 // result: (ROLL x y) 24150 for { 24151 _ = v.Args[1] 24152 v_0 := v.Args[0] 24153 if v_0.Op != OpAMD64ANDL { 24154 break 24155 } 24156 _ = v_0.Args[1] 24157 v_0_0 := v_0.Args[0] 24158 if v_0_0.Op != OpAMD64SBBLcarrymask { 24159 break 24160 } 24161 v_0_0_0 := v_0_0.Args[0] 24162 if v_0_0_0.Op != OpAMD64CMPQconst || v_0_0_0.AuxInt != 32 { 24163 break 24164 } 24165 v_0_0_0_0 := v_0_0_0.Args[0] 24166 if v_0_0_0_0.Op != OpAMD64NEGQ { 24167 break 24168 } 24169 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24170 if v_0_0_0_0_0.Op != OpAMD64ADDQconst || v_0_0_0_0_0.AuxInt != -32 { 24171 break 24172 } 24173 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24174 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0_0_0_0.AuxInt != 31 { 24175 break 24176 } 24177 y := v_0_0_0_0_0_0.Args[0] 24178 v_0_1 := v_0.Args[1] 24179 if v_0_1.Op != OpAMD64SHRL { 24180 break 24181 } 24182 _ = v_0_1.Args[1] 24183 x := v_0_1.Args[0] 24184 v_0_1_1 := v_0_1.Args[1] 24185 if v_0_1_1.Op != OpAMD64NEGQ || y != v_0_1_1.Args[0] { 24186 break 24187 } 24188 v_1 := v.Args[1] 24189 if v_1.Op != OpAMD64SHLL { 24190 break 24191 } 24192 _ = v_1.Args[1] 24193 if x != v_1.Args[0] || y != v_1.Args[1] { 24194 break 24195 } 24196 v.reset(OpAMD64ROLL) 24197 v.AddArg(x) 24198 v.AddArg(y) 24199 return true 24200 } 24201 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 24202 // result: (ROLL x y) 24203 for { 24204 _ = v.Args[1] 24205 v_0 := v.Args[0] 24206 if v_0.Op != OpAMD64SHLL { 24207 break 24208 } 24209 y := v_0.Args[1] 24210 x := v_0.Args[0] 24211 v_1 := v.Args[1] 24212 if v_1.Op != OpAMD64ANDL { 24213 break 24214 } 24215 _ = v_1.Args[1] 24216 v_1_0 := v_1.Args[0] 24217 if v_1_0.Op != OpAMD64SHRL { 24218 break 24219 } 24220 _ = v_1_0.Args[1] 24221 if x != v_1_0.Args[0] { 24222 break 24223 } 24224 v_1_0_1 := v_1_0.Args[1] 24225 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { 24226 break 24227 } 24228 v_1_1 := v_1.Args[1] 24229 if v_1_1.Op != OpAMD64SBBLcarrymask { 24230 break 24231 } 24232 v_1_1_0 := v_1_1.Args[0] 24233 if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 32 { 24234 break 24235 } 24236 v_1_1_0_0 := v_1_1_0.Args[0] 24237 if v_1_1_0_0.Op != OpAMD64NEGL { 24238 break 24239 } 24240 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24241 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -32 { 24242 break 24243 } 24244 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24245 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { 24246 break 24247 } 24248 v.reset(OpAMD64ROLL) 24249 v.AddArg(x) 24250 v.AddArg(y) 24251 return true 24252 } 24253 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) 24254 // result: (ROLL x y) 24255 for { 24256 _ = v.Args[1] 24257 v_0 := v.Args[0] 24258 if v_0.Op != OpAMD64SHLL { 24259 break 24260 } 24261 y := v_0.Args[1] 24262 x := v_0.Args[0] 24263 v_1 := v.Args[1] 24264 if v_1.Op != OpAMD64ANDL { 24265 break 24266 } 24267 _ = v_1.Args[1] 24268 v_1_0 := v_1.Args[0] 24269 if v_1_0.Op != OpAMD64SBBLcarrymask { 24270 break 24271 } 24272 v_1_0_0 := v_1_0.Args[0] 24273 if v_1_0_0.Op != OpAMD64CMPLconst || v_1_0_0.AuxInt != 32 { 24274 break 24275 } 24276 v_1_0_0_0 := v_1_0_0.Args[0] 24277 if v_1_0_0_0.Op != OpAMD64NEGL { 24278 break 24279 } 24280 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24281 if v_1_0_0_0_0.Op != OpAMD64ADDLconst || v_1_0_0_0_0.AuxInt != -32 { 24282 break 24283 } 24284 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24285 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst || v_1_0_0_0_0_0.AuxInt != 31 || y != v_1_0_0_0_0_0.Args[0] { 24286 break 24287 } 24288 v_1_1 := v_1.Args[1] 24289 if v_1_1.Op != OpAMD64SHRL { 24290 break 24291 } 24292 _ = v_1_1.Args[1] 24293 if x != v_1_1.Args[0] { 24294 break 24295 } 24296 v_1_1_1 := v_1_1.Args[1] 24297 if v_1_1_1.Op != OpAMD64NEGL || y != v_1_1_1.Args[0] { 24298 break 24299 } 24300 v.reset(OpAMD64ROLL) 24301 v.AddArg(x) 24302 v.AddArg(y) 24303 return true 24304 } 24305 // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) 24306 // result: (ROLL x y) 24307 for { 24308 _ = v.Args[1] 24309 v_0 := v.Args[0] 24310 if v_0.Op != OpAMD64ANDL { 24311 break 24312 } 24313 _ = v_0.Args[1] 24314 v_0_0 := v_0.Args[0] 24315 if v_0_0.Op != OpAMD64SHRL { 24316 break 24317 } 24318 _ = v_0_0.Args[1] 24319 x := v_0_0.Args[0] 24320 v_0_0_1 := v_0_0.Args[1] 24321 if v_0_0_1.Op != OpAMD64NEGL { 24322 break 24323 } 24324 y := v_0_0_1.Args[0] 24325 v_0_1 := v_0.Args[1] 24326 if v_0_1.Op != OpAMD64SBBLcarrymask { 24327 break 24328 } 24329 v_0_1_0 := v_0_1.Args[0] 24330 if v_0_1_0.Op != OpAMD64CMPLconst || v_0_1_0.AuxInt != 32 { 24331 break 24332 } 24333 v_0_1_0_0 := v_0_1_0.Args[0] 24334 if v_0_1_0_0.Op != OpAMD64NEGL { 24335 break 24336 } 24337 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24338 if v_0_1_0_0_0.Op != OpAMD64ADDLconst || v_0_1_0_0_0.AuxInt != -32 { 24339 break 24340 } 24341 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24342 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0_0_0.AuxInt != 31 || y != v_0_1_0_0_0_0.Args[0] { 24343 break 24344 } 24345 v_1 := v.Args[1] 24346 if v_1.Op != OpAMD64SHLL { 24347 break 24348 } 24349 _ = v_1.Args[1] 24350 if x != v_1.Args[0] || y != v_1.Args[1] { 24351 break 24352 } 24353 v.reset(OpAMD64ROLL) 24354 v.AddArg(x) 24355 v.AddArg(y) 24356 return true 24357 } 24358 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) 24359 // result: (ROLL x y) 24360 for { 24361 _ = v.Args[1] 24362 v_0 := v.Args[0] 24363 if v_0.Op != OpAMD64ANDL { 24364 break 24365 } 24366 _ = v_0.Args[1] 24367 v_0_0 := v_0.Args[0] 24368 if v_0_0.Op != OpAMD64SBBLcarrymask { 24369 break 24370 } 24371 v_0_0_0 := v_0_0.Args[0] 24372 if v_0_0_0.Op != OpAMD64CMPLconst || v_0_0_0.AuxInt != 32 { 24373 break 24374 } 24375 v_0_0_0_0 := v_0_0_0.Args[0] 24376 if v_0_0_0_0.Op != OpAMD64NEGL { 24377 break 24378 } 24379 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24380 if v_0_0_0_0_0.Op != OpAMD64ADDLconst || v_0_0_0_0_0.AuxInt != -32 { 24381 break 24382 } 24383 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24384 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst || v_0_0_0_0_0_0.AuxInt != 31 { 24385 break 24386 } 24387 y := v_0_0_0_0_0_0.Args[0] 24388 v_0_1 := v_0.Args[1] 24389 if v_0_1.Op != OpAMD64SHRL { 24390 break 24391 } 24392 _ = v_0_1.Args[1] 24393 x := v_0_1.Args[0] 24394 v_0_1_1 := v_0_1.Args[1] 24395 if v_0_1_1.Op != OpAMD64NEGL || y != v_0_1_1.Args[0] { 24396 break 24397 } 24398 v_1 := v.Args[1] 24399 if v_1.Op != OpAMD64SHLL { 24400 break 24401 } 24402 _ = v_1.Args[1] 24403 if x != v_1.Args[0] || y != v_1.Args[1] { 24404 break 24405 } 24406 v.reset(OpAMD64ROLL) 24407 v.AddArg(x) 24408 v.AddArg(y) 24409 return true 24410 } 24411 return false 24412 } 24413 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { 24414 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 24415 // result: (RORL x y) 24416 for { 24417 _ = v.Args[1] 24418 v_0 := v.Args[0] 24419 if v_0.Op != OpAMD64SHRL { 24420 break 24421 } 24422 y := v_0.Args[1] 24423 x := v_0.Args[0] 24424 v_1 := v.Args[1] 24425 if v_1.Op != OpAMD64ANDL { 24426 break 24427 } 24428 _ = v_1.Args[1] 24429 v_1_0 := v_1.Args[0] 24430 if v_1_0.Op != OpAMD64SHLL { 24431 break 24432 } 24433 _ = v_1_0.Args[1] 24434 if x != v_1_0.Args[0] { 24435 break 24436 } 24437 v_1_0_1 := v_1_0.Args[1] 24438 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { 24439 break 24440 } 24441 v_1_1 := v_1.Args[1] 24442 if v_1_1.Op != OpAMD64SBBLcarrymask { 24443 break 24444 } 24445 v_1_1_0 := v_1_1.Args[0] 24446 if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 32 { 24447 break 24448 } 24449 v_1_1_0_0 := v_1_1_0.Args[0] 24450 if v_1_1_0_0.Op != OpAMD64NEGQ { 24451 break 24452 } 24453 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24454 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -32 { 24455 break 24456 } 24457 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24458 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { 24459 break 24460 } 24461 v.reset(OpAMD64RORL) 24462 v.AddArg(x) 24463 v.AddArg(y) 24464 return true 24465 } 24466 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) 24467 // result: (RORL x y) 24468 for { 24469 _ = v.Args[1] 24470 v_0 := v.Args[0] 24471 if v_0.Op != OpAMD64SHRL { 24472 break 24473 } 24474 y := v_0.Args[1] 24475 x := v_0.Args[0] 24476 v_1 := v.Args[1] 24477 if v_1.Op != OpAMD64ANDL { 24478 break 24479 } 24480 _ = v_1.Args[1] 24481 v_1_0 := v_1.Args[0] 24482 if v_1_0.Op != OpAMD64SBBLcarrymask { 24483 break 24484 } 24485 v_1_0_0 := v_1_0.Args[0] 24486 if v_1_0_0.Op != OpAMD64CMPQconst || v_1_0_0.AuxInt != 32 { 24487 break 24488 } 24489 v_1_0_0_0 := v_1_0_0.Args[0] 24490 if v_1_0_0_0.Op != OpAMD64NEGQ { 24491 break 24492 } 24493 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24494 if v_1_0_0_0_0.Op != OpAMD64ADDQconst || v_1_0_0_0_0.AuxInt != -32 { 24495 break 24496 } 24497 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24498 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst || v_1_0_0_0_0_0.AuxInt != 31 || y != v_1_0_0_0_0_0.Args[0] { 24499 break 24500 } 24501 v_1_1 := v_1.Args[1] 24502 if v_1_1.Op != OpAMD64SHLL { 24503 break 24504 } 24505 _ = v_1_1.Args[1] 24506 if x != v_1_1.Args[0] { 24507 break 24508 } 24509 v_1_1_1 := v_1_1.Args[1] 24510 if v_1_1_1.Op != OpAMD64NEGQ || y != v_1_1_1.Args[0] { 24511 break 24512 } 24513 v.reset(OpAMD64RORL) 24514 v.AddArg(x) 24515 v.AddArg(y) 24516 return true 24517 } 24518 // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) 24519 // result: (RORL x y) 24520 for { 24521 _ = v.Args[1] 24522 v_0 := v.Args[0] 24523 if v_0.Op != OpAMD64ANDL { 24524 break 24525 } 24526 _ = v_0.Args[1] 24527 v_0_0 := v_0.Args[0] 24528 if v_0_0.Op != OpAMD64SHLL { 24529 break 24530 } 24531 _ = v_0_0.Args[1] 24532 x := v_0_0.Args[0] 24533 v_0_0_1 := v_0_0.Args[1] 24534 if v_0_0_1.Op != OpAMD64NEGQ { 24535 break 24536 } 24537 y := v_0_0_1.Args[0] 24538 v_0_1 := v_0.Args[1] 24539 if v_0_1.Op != OpAMD64SBBLcarrymask { 24540 break 24541 } 24542 v_0_1_0 := v_0_1.Args[0] 24543 if v_0_1_0.Op != OpAMD64CMPQconst || v_0_1_0.AuxInt != 32 { 24544 break 24545 } 24546 v_0_1_0_0 := v_0_1_0.Args[0] 24547 if v_0_1_0_0.Op != OpAMD64NEGQ { 24548 break 24549 } 24550 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24551 if v_0_1_0_0_0.Op != OpAMD64ADDQconst || v_0_1_0_0_0.AuxInt != -32 { 24552 break 24553 } 24554 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24555 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0_0_0.AuxInt != 31 || y != v_0_1_0_0_0_0.Args[0] { 24556 break 24557 } 24558 v_1 := v.Args[1] 24559 if v_1.Op != OpAMD64SHRL { 24560 break 24561 } 24562 _ = v_1.Args[1] 24563 if x != v_1.Args[0] || y != v_1.Args[1] { 24564 break 24565 } 24566 v.reset(OpAMD64RORL) 24567 v.AddArg(x) 24568 v.AddArg(y) 24569 return true 24570 } 24571 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) 24572 // result: (RORL x y) 24573 for { 24574 _ = v.Args[1] 24575 v_0 := v.Args[0] 24576 if v_0.Op != OpAMD64ANDL { 24577 break 24578 } 24579 _ = v_0.Args[1] 24580 v_0_0 := v_0.Args[0] 24581 if v_0_0.Op != OpAMD64SBBLcarrymask { 24582 break 24583 } 24584 v_0_0_0 := v_0_0.Args[0] 24585 if v_0_0_0.Op != OpAMD64CMPQconst || v_0_0_0.AuxInt != 32 { 24586 break 24587 } 24588 v_0_0_0_0 := v_0_0_0.Args[0] 24589 if v_0_0_0_0.Op != OpAMD64NEGQ { 24590 break 24591 } 24592 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24593 if v_0_0_0_0_0.Op != OpAMD64ADDQconst || v_0_0_0_0_0.AuxInt != -32 { 24594 break 24595 } 24596 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24597 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0_0_0_0.AuxInt != 31 { 24598 break 24599 } 24600 y := v_0_0_0_0_0_0.Args[0] 24601 v_0_1 := v_0.Args[1] 24602 if v_0_1.Op != OpAMD64SHLL { 24603 break 24604 } 24605 _ = v_0_1.Args[1] 24606 x := v_0_1.Args[0] 24607 v_0_1_1 := v_0_1.Args[1] 24608 if v_0_1_1.Op != OpAMD64NEGQ || y != v_0_1_1.Args[0] { 24609 break 24610 } 24611 v_1 := v.Args[1] 24612 if v_1.Op != OpAMD64SHRL { 24613 break 24614 } 24615 _ = v_1.Args[1] 24616 if x != v_1.Args[0] || y != v_1.Args[1] { 24617 break 24618 } 24619 v.reset(OpAMD64RORL) 24620 v.AddArg(x) 24621 v.AddArg(y) 24622 return true 24623 } 24624 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 24625 // result: (RORL x y) 24626 for { 24627 _ = v.Args[1] 24628 v_0 := v.Args[0] 24629 if v_0.Op != OpAMD64SHRL { 24630 break 24631 } 24632 y := v_0.Args[1] 24633 x := v_0.Args[0] 24634 v_1 := v.Args[1] 24635 if v_1.Op != OpAMD64ANDL { 24636 break 24637 } 24638 _ = v_1.Args[1] 24639 v_1_0 := v_1.Args[0] 24640 if v_1_0.Op != OpAMD64SHLL { 24641 break 24642 } 24643 _ = v_1_0.Args[1] 24644 if x != v_1_0.Args[0] { 24645 break 24646 } 24647 v_1_0_1 := v_1_0.Args[1] 24648 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { 24649 break 24650 } 24651 v_1_1 := v_1.Args[1] 24652 if v_1_1.Op != OpAMD64SBBLcarrymask { 24653 break 24654 } 24655 v_1_1_0 := v_1_1.Args[0] 24656 if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 32 { 24657 break 24658 } 24659 v_1_1_0_0 := v_1_1_0.Args[0] 24660 if v_1_1_0_0.Op != OpAMD64NEGL { 24661 break 24662 } 24663 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24664 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -32 { 24665 break 24666 } 24667 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24668 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 31 || y != v_1_1_0_0_0_0.Args[0] { 24669 break 24670 } 24671 v.reset(OpAMD64RORL) 24672 v.AddArg(x) 24673 v.AddArg(y) 24674 return true 24675 } 24676 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) 24677 // result: (RORL x y) 24678 for { 24679 _ = v.Args[1] 24680 v_0 := v.Args[0] 24681 if v_0.Op != OpAMD64SHRL { 24682 break 24683 } 24684 y := v_0.Args[1] 24685 x := v_0.Args[0] 24686 v_1 := v.Args[1] 24687 if v_1.Op != OpAMD64ANDL { 24688 break 24689 } 24690 _ = v_1.Args[1] 24691 v_1_0 := v_1.Args[0] 24692 if v_1_0.Op != OpAMD64SBBLcarrymask { 24693 break 24694 } 24695 v_1_0_0 := v_1_0.Args[0] 24696 if v_1_0_0.Op != OpAMD64CMPLconst || v_1_0_0.AuxInt != 32 { 24697 break 24698 } 24699 v_1_0_0_0 := v_1_0_0.Args[0] 24700 if v_1_0_0_0.Op != OpAMD64NEGL { 24701 break 24702 } 24703 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24704 if v_1_0_0_0_0.Op != OpAMD64ADDLconst || v_1_0_0_0_0.AuxInt != -32 { 24705 break 24706 } 24707 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24708 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst || v_1_0_0_0_0_0.AuxInt != 31 || y != v_1_0_0_0_0_0.Args[0] { 24709 break 24710 } 24711 v_1_1 := v_1.Args[1] 24712 if v_1_1.Op != OpAMD64SHLL { 24713 break 24714 } 24715 _ = v_1_1.Args[1] 24716 if x != v_1_1.Args[0] { 24717 break 24718 } 24719 v_1_1_1 := v_1_1.Args[1] 24720 if v_1_1_1.Op != OpAMD64NEGL || y != v_1_1_1.Args[0] { 24721 break 24722 } 24723 v.reset(OpAMD64RORL) 24724 v.AddArg(x) 24725 v.AddArg(y) 24726 return true 24727 } 24728 // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) 24729 // result: (RORL x y) 24730 for { 24731 _ = v.Args[1] 24732 v_0 := v.Args[0] 24733 if v_0.Op != OpAMD64ANDL { 24734 break 24735 } 24736 _ = v_0.Args[1] 24737 v_0_0 := v_0.Args[0] 24738 if v_0_0.Op != OpAMD64SHLL { 24739 break 24740 } 24741 _ = v_0_0.Args[1] 24742 x := v_0_0.Args[0] 24743 v_0_0_1 := v_0_0.Args[1] 24744 if v_0_0_1.Op != OpAMD64NEGL { 24745 break 24746 } 24747 y := v_0_0_1.Args[0] 24748 v_0_1 := v_0.Args[1] 24749 if v_0_1.Op != OpAMD64SBBLcarrymask { 24750 break 24751 } 24752 v_0_1_0 := v_0_1.Args[0] 24753 if v_0_1_0.Op != OpAMD64CMPLconst || v_0_1_0.AuxInt != 32 { 24754 break 24755 } 24756 v_0_1_0_0 := v_0_1_0.Args[0] 24757 if v_0_1_0_0.Op != OpAMD64NEGL { 24758 break 24759 } 24760 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24761 if v_0_1_0_0_0.Op != OpAMD64ADDLconst || v_0_1_0_0_0.AuxInt != -32 { 24762 break 24763 } 24764 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24765 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0_0_0.AuxInt != 31 || y != v_0_1_0_0_0_0.Args[0] { 24766 break 24767 } 24768 v_1 := v.Args[1] 24769 if v_1.Op != OpAMD64SHRL { 24770 break 24771 } 24772 _ = v_1.Args[1] 24773 if x != v_1.Args[0] || y != v_1.Args[1] { 24774 break 24775 } 24776 v.reset(OpAMD64RORL) 24777 v.AddArg(x) 24778 v.AddArg(y) 24779 return true 24780 } 24781 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) 24782 // result: (RORL x y) 24783 for { 24784 _ = v.Args[1] 24785 v_0 := v.Args[0] 24786 if v_0.Op != OpAMD64ANDL { 24787 break 24788 } 24789 _ = v_0.Args[1] 24790 v_0_0 := v_0.Args[0] 24791 if v_0_0.Op != OpAMD64SBBLcarrymask { 24792 break 24793 } 24794 v_0_0_0 := v_0_0.Args[0] 24795 if v_0_0_0.Op != OpAMD64CMPLconst || v_0_0_0.AuxInt != 32 { 24796 break 24797 } 24798 v_0_0_0_0 := v_0_0_0.Args[0] 24799 if v_0_0_0_0.Op != OpAMD64NEGL { 24800 break 24801 } 24802 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24803 if v_0_0_0_0_0.Op != OpAMD64ADDLconst || v_0_0_0_0_0.AuxInt != -32 { 24804 break 24805 } 24806 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24807 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst || v_0_0_0_0_0_0.AuxInt != 31 { 24808 break 24809 } 24810 y := v_0_0_0_0_0_0.Args[0] 24811 v_0_1 := v_0.Args[1] 24812 if v_0_1.Op != OpAMD64SHLL { 24813 break 24814 } 24815 _ = v_0_1.Args[1] 24816 x := v_0_1.Args[0] 24817 v_0_1_1 := v_0_1.Args[1] 24818 if v_0_1_1.Op != OpAMD64NEGL || y != v_0_1_1.Args[0] { 24819 break 24820 } 24821 v_1 := v.Args[1] 24822 if v_1.Op != OpAMD64SHRL { 24823 break 24824 } 24825 _ = v_1.Args[1] 24826 if x != v_1.Args[0] || y != v_1.Args[1] { 24827 break 24828 } 24829 v.reset(OpAMD64RORL) 24830 v.AddArg(x) 24831 v.AddArg(y) 24832 return true 24833 } 24834 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) 24835 // cond: v.Type.Size() == 2 24836 // result: (ROLW x y) 24837 for { 24838 _ = v.Args[1] 24839 v_0 := v.Args[0] 24840 if v_0.Op != OpAMD64SHLL { 24841 break 24842 } 24843 _ = v_0.Args[1] 24844 x := v_0.Args[0] 24845 v_0_1 := v_0.Args[1] 24846 if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 15 { 24847 break 24848 } 24849 y := v_0_1.Args[0] 24850 v_1 := v.Args[1] 24851 if v_1.Op != OpAMD64ANDL { 24852 break 24853 } 24854 _ = v_1.Args[1] 24855 v_1_0 := v_1.Args[0] 24856 if v_1_0.Op != OpAMD64SHRW { 24857 break 24858 } 24859 _ = v_1_0.Args[1] 24860 if x != v_1_0.Args[0] { 24861 break 24862 } 24863 v_1_0_1 := v_1_0.Args[1] 24864 if v_1_0_1.Op != OpAMD64NEGQ { 24865 break 24866 } 24867 v_1_0_1_0 := v_1_0_1.Args[0] 24868 if v_1_0_1_0.Op != OpAMD64ADDQconst || v_1_0_1_0.AuxInt != -16 { 24869 break 24870 } 24871 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 24872 if v_1_0_1_0_0.Op != OpAMD64ANDQconst || v_1_0_1_0_0.AuxInt != 15 || y != v_1_0_1_0_0.Args[0] { 24873 break 24874 } 24875 v_1_1 := v_1.Args[1] 24876 if v_1_1.Op != OpAMD64SBBLcarrymask { 24877 break 24878 } 24879 v_1_1_0 := v_1_1.Args[0] 24880 if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 16 { 24881 break 24882 } 24883 v_1_1_0_0 := v_1_1_0.Args[0] 24884 if v_1_1_0_0.Op != OpAMD64NEGQ { 24885 break 24886 } 24887 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24888 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -16 { 24889 break 24890 } 24891 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24892 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) { 24893 break 24894 } 24895 v.reset(OpAMD64ROLW) 24896 v.AddArg(x) 24897 v.AddArg(y) 24898 return true 24899 } 24900 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) 24901 // cond: v.Type.Size() == 2 24902 // result: (ROLW x y) 24903 for { 24904 _ = v.Args[1] 24905 v_0 := v.Args[0] 24906 if v_0.Op != OpAMD64SHLL { 24907 break 24908 } 24909 _ = v_0.Args[1] 24910 x := v_0.Args[0] 24911 v_0_1 := v_0.Args[1] 24912 if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 15 { 24913 break 24914 } 24915 y := v_0_1.Args[0] 24916 v_1 := v.Args[1] 24917 if v_1.Op != OpAMD64ANDL { 24918 break 24919 } 24920 _ = v_1.Args[1] 24921 v_1_0 := v_1.Args[0] 24922 if v_1_0.Op != OpAMD64SBBLcarrymask { 24923 break 24924 } 24925 v_1_0_0 := v_1_0.Args[0] 24926 if v_1_0_0.Op != OpAMD64CMPQconst || v_1_0_0.AuxInt != 16 { 24927 break 24928 } 24929 v_1_0_0_0 := v_1_0_0.Args[0] 24930 if v_1_0_0_0.Op != OpAMD64NEGQ { 24931 break 24932 } 24933 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24934 if v_1_0_0_0_0.Op != OpAMD64ADDQconst || v_1_0_0_0_0.AuxInt != -16 { 24935 break 24936 } 24937 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24938 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst || v_1_0_0_0_0_0.AuxInt != 15 || y != v_1_0_0_0_0_0.Args[0] { 24939 break 24940 } 24941 v_1_1 := v_1.Args[1] 24942 if v_1_1.Op != OpAMD64SHRW { 24943 break 24944 } 24945 _ = v_1_1.Args[1] 24946 if x != v_1_1.Args[0] { 24947 break 24948 } 24949 v_1_1_1 := v_1_1.Args[1] 24950 if v_1_1_1.Op != OpAMD64NEGQ { 24951 break 24952 } 24953 v_1_1_1_0 := v_1_1_1.Args[0] 24954 if v_1_1_1_0.Op != OpAMD64ADDQconst || v_1_1_1_0.AuxInt != -16 { 24955 break 24956 } 24957 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 24958 if v_1_1_1_0_0.Op != OpAMD64ANDQconst || v_1_1_1_0_0.AuxInt != 15 || y != v_1_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { 24959 break 24960 } 24961 v.reset(OpAMD64ROLW) 24962 v.AddArg(x) 24963 v.AddArg(y) 24964 return true 24965 } 24966 return false 24967 } 24968 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { 24969 // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) 24970 // cond: v.Type.Size() == 2 24971 // result: (ROLW x y) 24972 for { 24973 _ = v.Args[1] 24974 v_0 := v.Args[0] 24975 if v_0.Op != OpAMD64ANDL { 24976 break 24977 } 24978 _ = v_0.Args[1] 24979 v_0_0 := v_0.Args[0] 24980 if v_0_0.Op != OpAMD64SHRW { 24981 break 24982 } 24983 _ = v_0_0.Args[1] 24984 x := v_0_0.Args[0] 24985 v_0_0_1 := v_0_0.Args[1] 24986 if v_0_0_1.Op != OpAMD64NEGQ { 24987 break 24988 } 24989 v_0_0_1_0 := v_0_0_1.Args[0] 24990 if v_0_0_1_0.Op != OpAMD64ADDQconst || v_0_0_1_0.AuxInt != -16 { 24991 break 24992 } 24993 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 24994 if v_0_0_1_0_0.Op != OpAMD64ANDQconst || v_0_0_1_0_0.AuxInt != 15 { 24995 break 24996 } 24997 y := v_0_0_1_0_0.Args[0] 24998 v_0_1 := v_0.Args[1] 24999 if v_0_1.Op != OpAMD64SBBLcarrymask { 25000 break 25001 } 25002 v_0_1_0 := v_0_1.Args[0] 25003 if v_0_1_0.Op != OpAMD64CMPQconst || v_0_1_0.AuxInt != 16 { 25004 break 25005 } 25006 v_0_1_0_0 := v_0_1_0.Args[0] 25007 if v_0_1_0_0.Op != OpAMD64NEGQ { 25008 break 25009 } 25010 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 25011 if v_0_1_0_0_0.Op != OpAMD64ADDQconst || v_0_1_0_0_0.AuxInt != -16 { 25012 break 25013 } 25014 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 25015 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0_0_0.AuxInt != 15 || y != v_0_1_0_0_0_0.Args[0] { 25016 break 25017 } 25018 v_1 := v.Args[1] 25019 if v_1.Op != OpAMD64SHLL { 25020 break 25021 } 25022 _ = v_1.Args[1] 25023 if x != v_1.Args[0] { 25024 break 25025 } 25026 v_1_1 := v_1.Args[1] 25027 if v_1_1.Op != OpAMD64ANDQconst || v_1_1.AuxInt != 15 || y != v_1_1.Args[0] || !(v.Type.Size() == 2) { 25028 break 25029 } 25030 v.reset(OpAMD64ROLW) 25031 v.AddArg(x) 25032 v.AddArg(y) 25033 return true 25034 } 25035 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) 25036 // cond: v.Type.Size() == 2 25037 // result: (ROLW x y) 25038 for { 25039 _ = v.Args[1] 25040 v_0 := v.Args[0] 25041 if v_0.Op != OpAMD64ANDL { 25042 break 25043 } 25044 _ = v_0.Args[1] 25045 v_0_0 := v_0.Args[0] 25046 if v_0_0.Op != OpAMD64SBBLcarrymask { 25047 break 25048 } 25049 v_0_0_0 := v_0_0.Args[0] 25050 if v_0_0_0.Op != OpAMD64CMPQconst || v_0_0_0.AuxInt != 16 { 25051 break 25052 } 25053 v_0_0_0_0 := v_0_0_0.Args[0] 25054 if v_0_0_0_0.Op != OpAMD64NEGQ { 25055 break 25056 } 25057 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 25058 if v_0_0_0_0_0.Op != OpAMD64ADDQconst || v_0_0_0_0_0.AuxInt != -16 { 25059 break 25060 } 25061 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 25062 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0_0_0_0.AuxInt != 15 { 25063 break 25064 } 25065 y := v_0_0_0_0_0_0.Args[0] 25066 v_0_1 := v_0.Args[1] 25067 if v_0_1.Op != OpAMD64SHRW { 25068 break 25069 } 25070 _ = v_0_1.Args[1] 25071 x := v_0_1.Args[0] 25072 v_0_1_1 := v_0_1.Args[1] 25073 if v_0_1_1.Op != OpAMD64NEGQ { 25074 break 25075 } 25076 v_0_1_1_0 := v_0_1_1.Args[0] 25077 if v_0_1_1_0.Op != OpAMD64ADDQconst || v_0_1_1_0.AuxInt != -16 { 25078 break 25079 } 25080 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 25081 if v_0_1_1_0_0.Op != OpAMD64ANDQconst || v_0_1_1_0_0.AuxInt != 15 || y != v_0_1_1_0_0.Args[0] { 25082 break 25083 } 25084 v_1 := v.Args[1] 25085 if v_1.Op != OpAMD64SHLL { 25086 break 25087 } 25088 _ = v_1.Args[1] 25089 if x != v_1.Args[0] { 25090 break 25091 } 25092 v_1_1 := v_1.Args[1] 25093 if v_1_1.Op != OpAMD64ANDQconst || v_1_1.AuxInt != 15 || y != v_1_1.Args[0] || !(v.Type.Size() == 2) { 25094 break 25095 } 25096 v.reset(OpAMD64ROLW) 25097 v.AddArg(x) 25098 v.AddArg(y) 25099 return true 25100 } 25101 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) 25102 // cond: v.Type.Size() == 2 25103 // result: (ROLW x y) 25104 for { 25105 _ = v.Args[1] 25106 v_0 := v.Args[0] 25107 if v_0.Op != OpAMD64SHLL { 25108 break 25109 } 25110 _ = v_0.Args[1] 25111 x := v_0.Args[0] 25112 v_0_1 := v_0.Args[1] 25113 if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 15 { 25114 break 25115 } 25116 y := v_0_1.Args[0] 25117 v_1 := v.Args[1] 25118 if v_1.Op != OpAMD64ANDL { 25119 break 25120 } 25121 _ = v_1.Args[1] 25122 v_1_0 := v_1.Args[0] 25123 if v_1_0.Op != OpAMD64SHRW { 25124 break 25125 } 25126 _ = v_1_0.Args[1] 25127 if x != v_1_0.Args[0] { 25128 break 25129 } 25130 v_1_0_1 := v_1_0.Args[1] 25131 if v_1_0_1.Op != OpAMD64NEGL { 25132 break 25133 } 25134 v_1_0_1_0 := v_1_0_1.Args[0] 25135 if v_1_0_1_0.Op != OpAMD64ADDLconst || v_1_0_1_0.AuxInt != -16 { 25136 break 25137 } 25138 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 25139 if v_1_0_1_0_0.Op != OpAMD64ANDLconst || v_1_0_1_0_0.AuxInt != 15 || y != v_1_0_1_0_0.Args[0] { 25140 break 25141 } 25142 v_1_1 := v_1.Args[1] 25143 if v_1_1.Op != OpAMD64SBBLcarrymask { 25144 break 25145 } 25146 v_1_1_0 := v_1_1.Args[0] 25147 if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 16 { 25148 break 25149 } 25150 v_1_1_0_0 := v_1_1_0.Args[0] 25151 if v_1_1_0_0.Op != OpAMD64NEGL { 25152 break 25153 } 25154 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 25155 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -16 { 25156 break 25157 } 25158 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 25159 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) { 25160 break 25161 } 25162 v.reset(OpAMD64ROLW) 25163 v.AddArg(x) 25164 v.AddArg(y) 25165 return true 25166 } 25167 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) 25168 // cond: v.Type.Size() == 2 25169 // result: (ROLW x y) 25170 for { 25171 _ = v.Args[1] 25172 v_0 := v.Args[0] 25173 if v_0.Op != OpAMD64SHLL { 25174 break 25175 } 25176 _ = v_0.Args[1] 25177 x := v_0.Args[0] 25178 v_0_1 := v_0.Args[1] 25179 if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 15 { 25180 break 25181 } 25182 y := v_0_1.Args[0] 25183 v_1 := v.Args[1] 25184 if v_1.Op != OpAMD64ANDL { 25185 break 25186 } 25187 _ = v_1.Args[1] 25188 v_1_0 := v_1.Args[0] 25189 if v_1_0.Op != OpAMD64SBBLcarrymask { 25190 break 25191 } 25192 v_1_0_0 := v_1_0.Args[0] 25193 if v_1_0_0.Op != OpAMD64CMPLconst || v_1_0_0.AuxInt != 16 { 25194 break 25195 } 25196 v_1_0_0_0 := v_1_0_0.Args[0] 25197 if v_1_0_0_0.Op != OpAMD64NEGL { 25198 break 25199 } 25200 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 25201 if v_1_0_0_0_0.Op != OpAMD64ADDLconst || v_1_0_0_0_0.AuxInt != -16 { 25202 break 25203 } 25204 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 25205 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst || v_1_0_0_0_0_0.AuxInt != 15 || y != v_1_0_0_0_0_0.Args[0] { 25206 break 25207 } 25208 v_1_1 := v_1.Args[1] 25209 if v_1_1.Op != OpAMD64SHRW { 25210 break 25211 } 25212 _ = v_1_1.Args[1] 25213 if x != v_1_1.Args[0] { 25214 break 25215 } 25216 v_1_1_1 := v_1_1.Args[1] 25217 if v_1_1_1.Op != OpAMD64NEGL { 25218 break 25219 } 25220 v_1_1_1_0 := v_1_1_1.Args[0] 25221 if v_1_1_1_0.Op != OpAMD64ADDLconst || v_1_1_1_0.AuxInt != -16 { 25222 break 25223 } 25224 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 25225 if v_1_1_1_0_0.Op != OpAMD64ANDLconst || v_1_1_1_0_0.AuxInt != 15 || y != v_1_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { 25226 break 25227 } 25228 v.reset(OpAMD64ROLW) 25229 v.AddArg(x) 25230 v.AddArg(y) 25231 return true 25232 } 25233 // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) 25234 // cond: v.Type.Size() == 2 25235 // result: (ROLW x y) 25236 for { 25237 _ = v.Args[1] 25238 v_0 := v.Args[0] 25239 if v_0.Op != OpAMD64ANDL { 25240 break 25241 } 25242 _ = v_0.Args[1] 25243 v_0_0 := v_0.Args[0] 25244 if v_0_0.Op != OpAMD64SHRW { 25245 break 25246 } 25247 _ = v_0_0.Args[1] 25248 x := v_0_0.Args[0] 25249 v_0_0_1 := v_0_0.Args[1] 25250 if v_0_0_1.Op != OpAMD64NEGL { 25251 break 25252 } 25253 v_0_0_1_0 := v_0_0_1.Args[0] 25254 if v_0_0_1_0.Op != OpAMD64ADDLconst || v_0_0_1_0.AuxInt != -16 { 25255 break 25256 } 25257 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 25258 if v_0_0_1_0_0.Op != OpAMD64ANDLconst || v_0_0_1_0_0.AuxInt != 15 { 25259 break 25260 } 25261 y := v_0_0_1_0_0.Args[0] 25262 v_0_1 := v_0.Args[1] 25263 if v_0_1.Op != OpAMD64SBBLcarrymask { 25264 break 25265 } 25266 v_0_1_0 := v_0_1.Args[0] 25267 if v_0_1_0.Op != OpAMD64CMPLconst || v_0_1_0.AuxInt != 16 { 25268 break 25269 } 25270 v_0_1_0_0 := v_0_1_0.Args[0] 25271 if v_0_1_0_0.Op != OpAMD64NEGL { 25272 break 25273 } 25274 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 25275 if v_0_1_0_0_0.Op != OpAMD64ADDLconst || v_0_1_0_0_0.AuxInt != -16 { 25276 break 25277 } 25278 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 25279 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0_0_0.AuxInt != 15 || y != v_0_1_0_0_0_0.Args[0] { 25280 break 25281 } 25282 v_1 := v.Args[1] 25283 if v_1.Op != OpAMD64SHLL { 25284 break 25285 } 25286 _ = v_1.Args[1] 25287 if x != v_1.Args[0] { 25288 break 25289 } 25290 v_1_1 := v_1.Args[1] 25291 if v_1_1.Op != OpAMD64ANDLconst || v_1_1.AuxInt != 15 || y != v_1_1.Args[0] || !(v.Type.Size() == 2) { 25292 break 25293 } 25294 v.reset(OpAMD64ROLW) 25295 v.AddArg(x) 25296 v.AddArg(y) 25297 return true 25298 } 25299 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) 25300 // cond: v.Type.Size() == 2 25301 // result: (ROLW x y) 25302 for { 25303 _ = v.Args[1] 25304 v_0 := v.Args[0] 25305 if v_0.Op != OpAMD64ANDL { 25306 break 25307 } 25308 _ = v_0.Args[1] 25309 v_0_0 := v_0.Args[0] 25310 if v_0_0.Op != OpAMD64SBBLcarrymask { 25311 break 25312 } 25313 v_0_0_0 := v_0_0.Args[0] 25314 if v_0_0_0.Op != OpAMD64CMPLconst || v_0_0_0.AuxInt != 16 { 25315 break 25316 } 25317 v_0_0_0_0 := v_0_0_0.Args[0] 25318 if v_0_0_0_0.Op != OpAMD64NEGL { 25319 break 25320 } 25321 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 25322 if v_0_0_0_0_0.Op != OpAMD64ADDLconst || v_0_0_0_0_0.AuxInt != -16 { 25323 break 25324 } 25325 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 25326 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst || v_0_0_0_0_0_0.AuxInt != 15 { 25327 break 25328 } 25329 y := v_0_0_0_0_0_0.Args[0] 25330 v_0_1 := v_0.Args[1] 25331 if v_0_1.Op != OpAMD64SHRW { 25332 break 25333 } 25334 _ = v_0_1.Args[1] 25335 x := v_0_1.Args[0] 25336 v_0_1_1 := v_0_1.Args[1] 25337 if v_0_1_1.Op != OpAMD64NEGL { 25338 break 25339 } 25340 v_0_1_1_0 := v_0_1_1.Args[0] 25341 if v_0_1_1_0.Op != OpAMD64ADDLconst || v_0_1_1_0.AuxInt != -16 { 25342 break 25343 } 25344 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 25345 if v_0_1_1_0_0.Op != OpAMD64ANDLconst || v_0_1_1_0_0.AuxInt != 15 || y != v_0_1_1_0_0.Args[0] { 25346 break 25347 } 25348 v_1 := v.Args[1] 25349 if v_1.Op != OpAMD64SHLL { 25350 break 25351 } 25352 _ = v_1.Args[1] 25353 if x != v_1.Args[0] { 25354 break 25355 } 25356 v_1_1 := v_1.Args[1] 25357 if v_1_1.Op != OpAMD64ANDLconst || v_1_1.AuxInt != 15 || y != v_1_1.Args[0] || !(v.Type.Size() == 2) { 25358 break 25359 } 25360 v.reset(OpAMD64ROLW) 25361 v.AddArg(x) 25362 v.AddArg(y) 25363 return true 25364 } 25365 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) 25366 // cond: v.Type.Size() == 2 25367 // result: (RORW x y) 25368 for { 25369 _ = v.Args[1] 25370 v_0 := v.Args[0] 25371 if v_0.Op != OpAMD64SHRW { 25372 break 25373 } 25374 _ = v_0.Args[1] 25375 x := v_0.Args[0] 25376 v_0_1 := v_0.Args[1] 25377 if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 15 { 25378 break 25379 } 25380 y := v_0_1.Args[0] 25381 v_1 := v.Args[1] 25382 if v_1.Op != OpAMD64SHLL { 25383 break 25384 } 25385 _ = v_1.Args[1] 25386 if x != v_1.Args[0] { 25387 break 25388 } 25389 v_1_1 := v_1.Args[1] 25390 if v_1_1.Op != OpAMD64NEGQ { 25391 break 25392 } 25393 v_1_1_0 := v_1_1.Args[0] 25394 if v_1_1_0.Op != OpAMD64ADDQconst || v_1_1_0.AuxInt != -16 { 25395 break 25396 } 25397 v_1_1_0_0 := v_1_1_0.Args[0] 25398 if v_1_1_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0.AuxInt != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { 25399 break 25400 } 25401 v.reset(OpAMD64RORW) 25402 v.AddArg(x) 25403 v.AddArg(y) 25404 return true 25405 } 25406 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) 25407 // cond: v.Type.Size() == 2 25408 // result: (RORW x y) 25409 for { 25410 _ = v.Args[1] 25411 v_0 := v.Args[0] 25412 if v_0.Op != OpAMD64SHLL { 25413 break 25414 } 25415 _ = v_0.Args[1] 25416 x := v_0.Args[0] 25417 v_0_1 := v_0.Args[1] 25418 if v_0_1.Op != OpAMD64NEGQ { 25419 break 25420 } 25421 v_0_1_0 := v_0_1.Args[0] 25422 if v_0_1_0.Op != OpAMD64ADDQconst || v_0_1_0.AuxInt != -16 { 25423 break 25424 } 25425 v_0_1_0_0 := v_0_1_0.Args[0] 25426 if v_0_1_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0.AuxInt != 15 { 25427 break 25428 } 25429 y := v_0_1_0_0.Args[0] 25430 v_1 := v.Args[1] 25431 if v_1.Op != OpAMD64SHRW { 25432 break 25433 } 25434 _ = v_1.Args[1] 25435 if x != v_1.Args[0] { 25436 break 25437 } 25438 v_1_1 := v_1.Args[1] 25439 if v_1_1.Op != OpAMD64ANDQconst || v_1_1.AuxInt != 15 || y != v_1_1.Args[0] || !(v.Type.Size() == 2) { 25440 break 25441 } 25442 v.reset(OpAMD64RORW) 25443 v.AddArg(x) 25444 v.AddArg(y) 25445 return true 25446 } 25447 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) 25448 // cond: v.Type.Size() == 2 25449 // result: (RORW x y) 25450 for { 25451 _ = v.Args[1] 25452 v_0 := v.Args[0] 25453 if v_0.Op != OpAMD64SHRW { 25454 break 25455 } 25456 _ = v_0.Args[1] 25457 x := v_0.Args[0] 25458 v_0_1 := v_0.Args[1] 25459 if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 15 { 25460 break 25461 } 25462 y := v_0_1.Args[0] 25463 v_1 := v.Args[1] 25464 if v_1.Op != OpAMD64SHLL { 25465 break 25466 } 25467 _ = v_1.Args[1] 25468 if x != v_1.Args[0] { 25469 break 25470 } 25471 v_1_1 := v_1.Args[1] 25472 if v_1_1.Op != OpAMD64NEGL { 25473 break 25474 } 25475 v_1_1_0 := v_1_1.Args[0] 25476 if v_1_1_0.Op != OpAMD64ADDLconst || v_1_1_0.AuxInt != -16 { 25477 break 25478 } 25479 v_1_1_0_0 := v_1_1_0.Args[0] 25480 if v_1_1_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0.AuxInt != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { 25481 break 25482 } 25483 v.reset(OpAMD64RORW) 25484 v.AddArg(x) 25485 v.AddArg(y) 25486 return true 25487 } 25488 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) 25489 // cond: v.Type.Size() == 2 25490 // result: (RORW x y) 25491 for { 25492 _ = v.Args[1] 25493 v_0 := v.Args[0] 25494 if v_0.Op != OpAMD64SHLL { 25495 break 25496 } 25497 _ = v_0.Args[1] 25498 x := v_0.Args[0] 25499 v_0_1 := v_0.Args[1] 25500 if v_0_1.Op != OpAMD64NEGL { 25501 break 25502 } 25503 v_0_1_0 := v_0_1.Args[0] 25504 if v_0_1_0.Op != OpAMD64ADDLconst || v_0_1_0.AuxInt != -16 { 25505 break 25506 } 25507 v_0_1_0_0 := v_0_1_0.Args[0] 25508 if v_0_1_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0.AuxInt != 15 { 25509 break 25510 } 25511 y := v_0_1_0_0.Args[0] 25512 v_1 := v.Args[1] 25513 if v_1.Op != OpAMD64SHRW { 25514 break 25515 } 25516 _ = v_1.Args[1] 25517 if x != v_1.Args[0] { 25518 break 25519 } 25520 v_1_1 := v_1.Args[1] 25521 if v_1_1.Op != OpAMD64ANDLconst || v_1_1.AuxInt != 15 || y != v_1_1.Args[0] || !(v.Type.Size() == 2) { 25522 break 25523 } 25524 v.reset(OpAMD64RORW) 25525 v.AddArg(x) 25526 v.AddArg(y) 25527 return true 25528 } 25529 return false 25530 } 25531 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { 25532 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) 25533 // cond: v.Type.Size() == 1 25534 // result: (ROLB x y) 25535 for { 25536 _ = v.Args[1] 25537 v_0 := v.Args[0] 25538 if v_0.Op != OpAMD64SHLL { 25539 break 25540 } 25541 _ = v_0.Args[1] 25542 x := v_0.Args[0] 25543 v_0_1 := v_0.Args[1] 25544 if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 7 { 25545 break 25546 } 25547 y := v_0_1.Args[0] 25548 v_1 := v.Args[1] 25549 if v_1.Op != OpAMD64ANDL { 25550 break 25551 } 25552 _ = v_1.Args[1] 25553 v_1_0 := v_1.Args[0] 25554 if v_1_0.Op != OpAMD64SHRB { 25555 break 25556 } 25557 _ = v_1_0.Args[1] 25558 if x != v_1_0.Args[0] { 25559 break 25560 } 25561 v_1_0_1 := v_1_0.Args[1] 25562 if v_1_0_1.Op != OpAMD64NEGQ { 25563 break 25564 } 25565 v_1_0_1_0 := v_1_0_1.Args[0] 25566 if v_1_0_1_0.Op != OpAMD64ADDQconst || v_1_0_1_0.AuxInt != -8 { 25567 break 25568 } 25569 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 25570 if v_1_0_1_0_0.Op != OpAMD64ANDQconst || v_1_0_1_0_0.AuxInt != 7 || y != v_1_0_1_0_0.Args[0] { 25571 break 25572 } 25573 v_1_1 := v_1.Args[1] 25574 if v_1_1.Op != OpAMD64SBBLcarrymask { 25575 break 25576 } 25577 v_1_1_0 := v_1_1.Args[0] 25578 if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 8 { 25579 break 25580 } 25581 v_1_1_0_0 := v_1_1_0.Args[0] 25582 if v_1_1_0_0.Op != OpAMD64NEGQ { 25583 break 25584 } 25585 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 25586 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -8 { 25587 break 25588 } 25589 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 25590 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { 25591 break 25592 } 25593 v.reset(OpAMD64ROLB) 25594 v.AddArg(x) 25595 v.AddArg(y) 25596 return true 25597 } 25598 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) 25599 // cond: v.Type.Size() == 1 25600 // result: (ROLB x y) 25601 for { 25602 _ = v.Args[1] 25603 v_0 := v.Args[0] 25604 if v_0.Op != OpAMD64SHLL { 25605 break 25606 } 25607 _ = v_0.Args[1] 25608 x := v_0.Args[0] 25609 v_0_1 := v_0.Args[1] 25610 if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 7 { 25611 break 25612 } 25613 y := v_0_1.Args[0] 25614 v_1 := v.Args[1] 25615 if v_1.Op != OpAMD64ANDL { 25616 break 25617 } 25618 _ = v_1.Args[1] 25619 v_1_0 := v_1.Args[0] 25620 if v_1_0.Op != OpAMD64SBBLcarrymask { 25621 break 25622 } 25623 v_1_0_0 := v_1_0.Args[0] 25624 if v_1_0_0.Op != OpAMD64CMPQconst || v_1_0_0.AuxInt != 8 { 25625 break 25626 } 25627 v_1_0_0_0 := v_1_0_0.Args[0] 25628 if v_1_0_0_0.Op != OpAMD64NEGQ { 25629 break 25630 } 25631 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 25632 if v_1_0_0_0_0.Op != OpAMD64ADDQconst || v_1_0_0_0_0.AuxInt != -8 { 25633 break 25634 } 25635 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 25636 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst || v_1_0_0_0_0_0.AuxInt != 7 || y != v_1_0_0_0_0_0.Args[0] { 25637 break 25638 } 25639 v_1_1 := v_1.Args[1] 25640 if v_1_1.Op != OpAMD64SHRB { 25641 break 25642 } 25643 _ = v_1_1.Args[1] 25644 if x != v_1_1.Args[0] { 25645 break 25646 } 25647 v_1_1_1 := v_1_1.Args[1] 25648 if v_1_1_1.Op != OpAMD64NEGQ { 25649 break 25650 } 25651 v_1_1_1_0 := v_1_1_1.Args[0] 25652 if v_1_1_1_0.Op != OpAMD64ADDQconst || v_1_1_1_0.AuxInt != -8 { 25653 break 25654 } 25655 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 25656 if v_1_1_1_0_0.Op != OpAMD64ANDQconst || v_1_1_1_0_0.AuxInt != 7 || y != v_1_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { 25657 break 25658 } 25659 v.reset(OpAMD64ROLB) 25660 v.AddArg(x) 25661 v.AddArg(y) 25662 return true 25663 } 25664 // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) 25665 // cond: v.Type.Size() == 1 25666 // result: (ROLB x y) 25667 for { 25668 _ = v.Args[1] 25669 v_0 := v.Args[0] 25670 if v_0.Op != OpAMD64ANDL { 25671 break 25672 } 25673 _ = v_0.Args[1] 25674 v_0_0 := v_0.Args[0] 25675 if v_0_0.Op != OpAMD64SHRB { 25676 break 25677 } 25678 _ = v_0_0.Args[1] 25679 x := v_0_0.Args[0] 25680 v_0_0_1 := v_0_0.Args[1] 25681 if v_0_0_1.Op != OpAMD64NEGQ { 25682 break 25683 } 25684 v_0_0_1_0 := v_0_0_1.Args[0] 25685 if v_0_0_1_0.Op != OpAMD64ADDQconst || v_0_0_1_0.AuxInt != -8 { 25686 break 25687 } 25688 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 25689 if v_0_0_1_0_0.Op != OpAMD64ANDQconst || v_0_0_1_0_0.AuxInt != 7 { 25690 break 25691 } 25692 y := v_0_0_1_0_0.Args[0] 25693 v_0_1 := v_0.Args[1] 25694 if v_0_1.Op != OpAMD64SBBLcarrymask { 25695 break 25696 } 25697 v_0_1_0 := v_0_1.Args[0] 25698 if v_0_1_0.Op != OpAMD64CMPQconst || v_0_1_0.AuxInt != 8 { 25699 break 25700 } 25701 v_0_1_0_0 := v_0_1_0.Args[0] 25702 if v_0_1_0_0.Op != OpAMD64NEGQ { 25703 break 25704 } 25705 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 25706 if v_0_1_0_0_0.Op != OpAMD64ADDQconst || v_0_1_0_0_0.AuxInt != -8 { 25707 break 25708 } 25709 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 25710 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0_0_0.AuxInt != 7 || y != v_0_1_0_0_0_0.Args[0] { 25711 break 25712 } 25713 v_1 := v.Args[1] 25714 if v_1.Op != OpAMD64SHLL { 25715 break 25716 } 25717 _ = v_1.Args[1] 25718 if x != v_1.Args[0] { 25719 break 25720 } 25721 v_1_1 := v_1.Args[1] 25722 if v_1_1.Op != OpAMD64ANDQconst || v_1_1.AuxInt != 7 || y != v_1_1.Args[0] || !(v.Type.Size() == 1) { 25723 break 25724 } 25725 v.reset(OpAMD64ROLB) 25726 v.AddArg(x) 25727 v.AddArg(y) 25728 return true 25729 } 25730 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) 25731 // cond: v.Type.Size() == 1 25732 // result: (ROLB x y) 25733 for { 25734 _ = v.Args[1] 25735 v_0 := v.Args[0] 25736 if v_0.Op != OpAMD64ANDL { 25737 break 25738 } 25739 _ = v_0.Args[1] 25740 v_0_0 := v_0.Args[0] 25741 if v_0_0.Op != OpAMD64SBBLcarrymask { 25742 break 25743 } 25744 v_0_0_0 := v_0_0.Args[0] 25745 if v_0_0_0.Op != OpAMD64CMPQconst || v_0_0_0.AuxInt != 8 { 25746 break 25747 } 25748 v_0_0_0_0 := v_0_0_0.Args[0] 25749 if v_0_0_0_0.Op != OpAMD64NEGQ { 25750 break 25751 } 25752 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 25753 if v_0_0_0_0_0.Op != OpAMD64ADDQconst || v_0_0_0_0_0.AuxInt != -8 { 25754 break 25755 } 25756 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 25757 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0_0_0_0.AuxInt != 7 { 25758 break 25759 } 25760 y := v_0_0_0_0_0_0.Args[0] 25761 v_0_1 := v_0.Args[1] 25762 if v_0_1.Op != OpAMD64SHRB { 25763 break 25764 } 25765 _ = v_0_1.Args[1] 25766 x := v_0_1.Args[0] 25767 v_0_1_1 := v_0_1.Args[1] 25768 if v_0_1_1.Op != OpAMD64NEGQ { 25769 break 25770 } 25771 v_0_1_1_0 := v_0_1_1.Args[0] 25772 if v_0_1_1_0.Op != OpAMD64ADDQconst || v_0_1_1_0.AuxInt != -8 { 25773 break 25774 } 25775 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 25776 if v_0_1_1_0_0.Op != OpAMD64ANDQconst || v_0_1_1_0_0.AuxInt != 7 || y != v_0_1_1_0_0.Args[0] { 25777 break 25778 } 25779 v_1 := v.Args[1] 25780 if v_1.Op != OpAMD64SHLL { 25781 break 25782 } 25783 _ = v_1.Args[1] 25784 if x != v_1.Args[0] { 25785 break 25786 } 25787 v_1_1 := v_1.Args[1] 25788 if v_1_1.Op != OpAMD64ANDQconst || v_1_1.AuxInt != 7 || y != v_1_1.Args[0] || !(v.Type.Size() == 1) { 25789 break 25790 } 25791 v.reset(OpAMD64ROLB) 25792 v.AddArg(x) 25793 v.AddArg(y) 25794 return true 25795 } 25796 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) 25797 // cond: v.Type.Size() == 1 25798 // result: (ROLB x y) 25799 for { 25800 _ = v.Args[1] 25801 v_0 := v.Args[0] 25802 if v_0.Op != OpAMD64SHLL { 25803 break 25804 } 25805 _ = v_0.Args[1] 25806 x := v_0.Args[0] 25807 v_0_1 := v_0.Args[1] 25808 if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 7 { 25809 break 25810 } 25811 y := v_0_1.Args[0] 25812 v_1 := v.Args[1] 25813 if v_1.Op != OpAMD64ANDL { 25814 break 25815 } 25816 _ = v_1.Args[1] 25817 v_1_0 := v_1.Args[0] 25818 if v_1_0.Op != OpAMD64SHRB { 25819 break 25820 } 25821 _ = v_1_0.Args[1] 25822 if x != v_1_0.Args[0] { 25823 break 25824 } 25825 v_1_0_1 := v_1_0.Args[1] 25826 if v_1_0_1.Op != OpAMD64NEGL { 25827 break 25828 } 25829 v_1_0_1_0 := v_1_0_1.Args[0] 25830 if v_1_0_1_0.Op != OpAMD64ADDLconst || v_1_0_1_0.AuxInt != -8 { 25831 break 25832 } 25833 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 25834 if v_1_0_1_0_0.Op != OpAMD64ANDLconst || v_1_0_1_0_0.AuxInt != 7 || y != v_1_0_1_0_0.Args[0] { 25835 break 25836 } 25837 v_1_1 := v_1.Args[1] 25838 if v_1_1.Op != OpAMD64SBBLcarrymask { 25839 break 25840 } 25841 v_1_1_0 := v_1_1.Args[0] 25842 if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 8 { 25843 break 25844 } 25845 v_1_1_0_0 := v_1_1_0.Args[0] 25846 if v_1_1_0_0.Op != OpAMD64NEGL { 25847 break 25848 } 25849 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 25850 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -8 { 25851 break 25852 } 25853 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 25854 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { 25855 break 25856 } 25857 v.reset(OpAMD64ROLB) 25858 v.AddArg(x) 25859 v.AddArg(y) 25860 return true 25861 } 25862 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) 25863 // cond: v.Type.Size() == 1 25864 // result: (ROLB x y) 25865 for { 25866 _ = v.Args[1] 25867 v_0 := v.Args[0] 25868 if v_0.Op != OpAMD64SHLL { 25869 break 25870 } 25871 _ = v_0.Args[1] 25872 x := v_0.Args[0] 25873 v_0_1 := v_0.Args[1] 25874 if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 7 { 25875 break 25876 } 25877 y := v_0_1.Args[0] 25878 v_1 := v.Args[1] 25879 if v_1.Op != OpAMD64ANDL { 25880 break 25881 } 25882 _ = v_1.Args[1] 25883 v_1_0 := v_1.Args[0] 25884 if v_1_0.Op != OpAMD64SBBLcarrymask { 25885 break 25886 } 25887 v_1_0_0 := v_1_0.Args[0] 25888 if v_1_0_0.Op != OpAMD64CMPLconst || v_1_0_0.AuxInt != 8 { 25889 break 25890 } 25891 v_1_0_0_0 := v_1_0_0.Args[0] 25892 if v_1_0_0_0.Op != OpAMD64NEGL { 25893 break 25894 } 25895 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 25896 if v_1_0_0_0_0.Op != OpAMD64ADDLconst || v_1_0_0_0_0.AuxInt != -8 { 25897 break 25898 } 25899 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 25900 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst || v_1_0_0_0_0_0.AuxInt != 7 || y != v_1_0_0_0_0_0.Args[0] { 25901 break 25902 } 25903 v_1_1 := v_1.Args[1] 25904 if v_1_1.Op != OpAMD64SHRB { 25905 break 25906 } 25907 _ = v_1_1.Args[1] 25908 if x != v_1_1.Args[0] { 25909 break 25910 } 25911 v_1_1_1 := v_1_1.Args[1] 25912 if v_1_1_1.Op != OpAMD64NEGL { 25913 break 25914 } 25915 v_1_1_1_0 := v_1_1_1.Args[0] 25916 if v_1_1_1_0.Op != OpAMD64ADDLconst || v_1_1_1_0.AuxInt != -8 { 25917 break 25918 } 25919 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 25920 if v_1_1_1_0_0.Op != OpAMD64ANDLconst || v_1_1_1_0_0.AuxInt != 7 || y != v_1_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { 25921 break 25922 } 25923 v.reset(OpAMD64ROLB) 25924 v.AddArg(x) 25925 v.AddArg(y) 25926 return true 25927 } 25928 // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) 25929 // cond: v.Type.Size() == 1 25930 // result: (ROLB x y) 25931 for { 25932 _ = v.Args[1] 25933 v_0 := v.Args[0] 25934 if v_0.Op != OpAMD64ANDL { 25935 break 25936 } 25937 _ = v_0.Args[1] 25938 v_0_0 := v_0.Args[0] 25939 if v_0_0.Op != OpAMD64SHRB { 25940 break 25941 } 25942 _ = v_0_0.Args[1] 25943 x := v_0_0.Args[0] 25944 v_0_0_1 := v_0_0.Args[1] 25945 if v_0_0_1.Op != OpAMD64NEGL { 25946 break 25947 } 25948 v_0_0_1_0 := v_0_0_1.Args[0] 25949 if v_0_0_1_0.Op != OpAMD64ADDLconst || v_0_0_1_0.AuxInt != -8 { 25950 break 25951 } 25952 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 25953 if v_0_0_1_0_0.Op != OpAMD64ANDLconst || v_0_0_1_0_0.AuxInt != 7 { 25954 break 25955 } 25956 y := v_0_0_1_0_0.Args[0] 25957 v_0_1 := v_0.Args[1] 25958 if v_0_1.Op != OpAMD64SBBLcarrymask { 25959 break 25960 } 25961 v_0_1_0 := v_0_1.Args[0] 25962 if v_0_1_0.Op != OpAMD64CMPLconst || v_0_1_0.AuxInt != 8 { 25963 break 25964 } 25965 v_0_1_0_0 := v_0_1_0.Args[0] 25966 if v_0_1_0_0.Op != OpAMD64NEGL { 25967 break 25968 } 25969 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 25970 if v_0_1_0_0_0.Op != OpAMD64ADDLconst || v_0_1_0_0_0.AuxInt != -8 { 25971 break 25972 } 25973 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 25974 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0_0_0.AuxInt != 7 || y != v_0_1_0_0_0_0.Args[0] { 25975 break 25976 } 25977 v_1 := v.Args[1] 25978 if v_1.Op != OpAMD64SHLL { 25979 break 25980 } 25981 _ = v_1.Args[1] 25982 if x != v_1.Args[0] { 25983 break 25984 } 25985 v_1_1 := v_1.Args[1] 25986 if v_1_1.Op != OpAMD64ANDLconst || v_1_1.AuxInt != 7 || y != v_1_1.Args[0] || !(v.Type.Size() == 1) { 25987 break 25988 } 25989 v.reset(OpAMD64ROLB) 25990 v.AddArg(x) 25991 v.AddArg(y) 25992 return true 25993 } 25994 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) 25995 // cond: v.Type.Size() == 1 25996 // result: (ROLB x y) 25997 for { 25998 _ = v.Args[1] 25999 v_0 := v.Args[0] 26000 if v_0.Op != OpAMD64ANDL { 26001 break 26002 } 26003 _ = v_0.Args[1] 26004 v_0_0 := v_0.Args[0] 26005 if v_0_0.Op != OpAMD64SBBLcarrymask { 26006 break 26007 } 26008 v_0_0_0 := v_0_0.Args[0] 26009 if v_0_0_0.Op != OpAMD64CMPLconst || v_0_0_0.AuxInt != 8 { 26010 break 26011 } 26012 v_0_0_0_0 := v_0_0_0.Args[0] 26013 if v_0_0_0_0.Op != OpAMD64NEGL { 26014 break 26015 } 26016 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 26017 if v_0_0_0_0_0.Op != OpAMD64ADDLconst || v_0_0_0_0_0.AuxInt != -8 { 26018 break 26019 } 26020 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 26021 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst || v_0_0_0_0_0_0.AuxInt != 7 { 26022 break 26023 } 26024 y := v_0_0_0_0_0_0.Args[0] 26025 v_0_1 := v_0.Args[1] 26026 if v_0_1.Op != OpAMD64SHRB { 26027 break 26028 } 26029 _ = v_0_1.Args[1] 26030 x := v_0_1.Args[0] 26031 v_0_1_1 := v_0_1.Args[1] 26032 if v_0_1_1.Op != OpAMD64NEGL { 26033 break 26034 } 26035 v_0_1_1_0 := v_0_1_1.Args[0] 26036 if v_0_1_1_0.Op != OpAMD64ADDLconst || v_0_1_1_0.AuxInt != -8 { 26037 break 26038 } 26039 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 26040 if v_0_1_1_0_0.Op != OpAMD64ANDLconst || v_0_1_1_0_0.AuxInt != 7 || y != v_0_1_1_0_0.Args[0] { 26041 break 26042 } 26043 v_1 := v.Args[1] 26044 if v_1.Op != OpAMD64SHLL { 26045 break 26046 } 26047 _ = v_1.Args[1] 26048 if x != v_1.Args[0] { 26049 break 26050 } 26051 v_1_1 := v_1.Args[1] 26052 if v_1_1.Op != OpAMD64ANDLconst || v_1_1.AuxInt != 7 || y != v_1_1.Args[0] || !(v.Type.Size() == 1) { 26053 break 26054 } 26055 v.reset(OpAMD64ROLB) 26056 v.AddArg(x) 26057 v.AddArg(y) 26058 return true 26059 } 26060 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) 26061 // cond: v.Type.Size() == 1 26062 // result: (RORB x y) 26063 for { 26064 _ = v.Args[1] 26065 v_0 := v.Args[0] 26066 if v_0.Op != OpAMD64SHRB { 26067 break 26068 } 26069 _ = v_0.Args[1] 26070 x := v_0.Args[0] 26071 v_0_1 := v_0.Args[1] 26072 if v_0_1.Op != OpAMD64ANDQconst || v_0_1.AuxInt != 7 { 26073 break 26074 } 26075 y := v_0_1.Args[0] 26076 v_1 := v.Args[1] 26077 if v_1.Op != OpAMD64SHLL { 26078 break 26079 } 26080 _ = v_1.Args[1] 26081 if x != v_1.Args[0] { 26082 break 26083 } 26084 v_1_1 := v_1.Args[1] 26085 if v_1_1.Op != OpAMD64NEGQ { 26086 break 26087 } 26088 v_1_1_0 := v_1_1.Args[0] 26089 if v_1_1_0.Op != OpAMD64ADDQconst || v_1_1_0.AuxInt != -8 { 26090 break 26091 } 26092 v_1_1_0_0 := v_1_1_0.Args[0] 26093 if v_1_1_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0.AuxInt != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { 26094 break 26095 } 26096 v.reset(OpAMD64RORB) 26097 v.AddArg(x) 26098 v.AddArg(y) 26099 return true 26100 } 26101 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) 26102 // cond: v.Type.Size() == 1 26103 // result: (RORB x y) 26104 for { 26105 _ = v.Args[1] 26106 v_0 := v.Args[0] 26107 if v_0.Op != OpAMD64SHLL { 26108 break 26109 } 26110 _ = v_0.Args[1] 26111 x := v_0.Args[0] 26112 v_0_1 := v_0.Args[1] 26113 if v_0_1.Op != OpAMD64NEGQ { 26114 break 26115 } 26116 v_0_1_0 := v_0_1.Args[0] 26117 if v_0_1_0.Op != OpAMD64ADDQconst || v_0_1_0.AuxInt != -8 { 26118 break 26119 } 26120 v_0_1_0_0 := v_0_1_0.Args[0] 26121 if v_0_1_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0.AuxInt != 7 { 26122 break 26123 } 26124 y := v_0_1_0_0.Args[0] 26125 v_1 := v.Args[1] 26126 if v_1.Op != OpAMD64SHRB { 26127 break 26128 } 26129 _ = v_1.Args[1] 26130 if x != v_1.Args[0] { 26131 break 26132 } 26133 v_1_1 := v_1.Args[1] 26134 if v_1_1.Op != OpAMD64ANDQconst || v_1_1.AuxInt != 7 || y != v_1_1.Args[0] || !(v.Type.Size() == 1) { 26135 break 26136 } 26137 v.reset(OpAMD64RORB) 26138 v.AddArg(x) 26139 v.AddArg(y) 26140 return true 26141 } 26142 return false 26143 } 26144 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { 26145 b := v.Block 26146 typ := &b.Func.Config.Types 26147 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) 26148 // cond: v.Type.Size() == 1 26149 // result: (RORB x y) 26150 for { 26151 _ = v.Args[1] 26152 v_0 := v.Args[0] 26153 if v_0.Op != OpAMD64SHRB { 26154 break 26155 } 26156 _ = v_0.Args[1] 26157 x := v_0.Args[0] 26158 v_0_1 := v_0.Args[1] 26159 if v_0_1.Op != OpAMD64ANDLconst || v_0_1.AuxInt != 7 { 26160 break 26161 } 26162 y := v_0_1.Args[0] 26163 v_1 := v.Args[1] 26164 if v_1.Op != OpAMD64SHLL { 26165 break 26166 } 26167 _ = v_1.Args[1] 26168 if x != v_1.Args[0] { 26169 break 26170 } 26171 v_1_1 := v_1.Args[1] 26172 if v_1_1.Op != OpAMD64NEGL { 26173 break 26174 } 26175 v_1_1_0 := v_1_1.Args[0] 26176 if v_1_1_0.Op != OpAMD64ADDLconst || v_1_1_0.AuxInt != -8 { 26177 break 26178 } 26179 v_1_1_0_0 := v_1_1_0.Args[0] 26180 if v_1_1_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0.AuxInt != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { 26181 break 26182 } 26183 v.reset(OpAMD64RORB) 26184 v.AddArg(x) 26185 v.AddArg(y) 26186 return true 26187 } 26188 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) 26189 // cond: v.Type.Size() == 1 26190 // result: (RORB x y) 26191 for { 26192 _ = v.Args[1] 26193 v_0 := v.Args[0] 26194 if v_0.Op != OpAMD64SHLL { 26195 break 26196 } 26197 _ = v_0.Args[1] 26198 x := v_0.Args[0] 26199 v_0_1 := v_0.Args[1] 26200 if v_0_1.Op != OpAMD64NEGL { 26201 break 26202 } 26203 v_0_1_0 := v_0_1.Args[0] 26204 if v_0_1_0.Op != OpAMD64ADDLconst || v_0_1_0.AuxInt != -8 { 26205 break 26206 } 26207 v_0_1_0_0 := v_0_1_0.Args[0] 26208 if v_0_1_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0.AuxInt != 7 { 26209 break 26210 } 26211 y := v_0_1_0_0.Args[0] 26212 v_1 := v.Args[1] 26213 if v_1.Op != OpAMD64SHRB { 26214 break 26215 } 26216 _ = v_1.Args[1] 26217 if x != v_1.Args[0] { 26218 break 26219 } 26220 v_1_1 := v_1.Args[1] 26221 if v_1_1.Op != OpAMD64ANDLconst || v_1_1.AuxInt != 7 || y != v_1_1.Args[0] || !(v.Type.Size() == 1) { 26222 break 26223 } 26224 v.reset(OpAMD64RORB) 26225 v.AddArg(x) 26226 v.AddArg(y) 26227 return true 26228 } 26229 // match: (ORL x x) 26230 // result: x 26231 for { 26232 x := v.Args[1] 26233 if x != v.Args[0] { 26234 break 26235 } 26236 v.reset(OpCopy) 26237 v.Type = x.Type 26238 v.AddArg(x) 26239 return true 26240 } 26241 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 26242 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26243 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 26244 for { 26245 _ = v.Args[1] 26246 x0 := v.Args[0] 26247 if x0.Op != OpAMD64MOVBload { 26248 break 26249 } 26250 i0 := x0.AuxInt 26251 s := x0.Aux 26252 mem := x0.Args[1] 26253 p := x0.Args[0] 26254 sh := v.Args[1] 26255 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 26256 break 26257 } 26258 x1 := sh.Args[0] 26259 if x1.Op != OpAMD64MOVBload { 26260 break 26261 } 26262 i1 := x1.AuxInt 26263 if x1.Aux != s { 26264 break 26265 } 26266 _ = x1.Args[1] 26267 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26268 break 26269 } 26270 b = mergePoint(b, x0, x1) 26271 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 26272 v.reset(OpCopy) 26273 v.AddArg(v0) 26274 v0.AuxInt = i0 26275 v0.Aux = s 26276 v0.AddArg(p) 26277 v0.AddArg(mem) 26278 return true 26279 } 26280 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 26281 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26282 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 26283 for { 26284 _ = v.Args[1] 26285 sh := v.Args[0] 26286 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 26287 break 26288 } 26289 x1 := sh.Args[0] 26290 if x1.Op != OpAMD64MOVBload { 26291 break 26292 } 26293 i1 := x1.AuxInt 26294 s := x1.Aux 26295 mem := x1.Args[1] 26296 p := x1.Args[0] 26297 x0 := v.Args[1] 26298 if x0.Op != OpAMD64MOVBload { 26299 break 26300 } 26301 i0 := x0.AuxInt 26302 if x0.Aux != s { 26303 break 26304 } 26305 _ = x0.Args[1] 26306 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26307 break 26308 } 26309 b = mergePoint(b, x0, x1) 26310 v0 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 26311 v.reset(OpCopy) 26312 v.AddArg(v0) 26313 v0.AuxInt = i0 26314 v0.Aux = s 26315 v0.AddArg(p) 26316 v0.AddArg(mem) 26317 return true 26318 } 26319 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 26320 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26321 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 26322 for { 26323 _ = v.Args[1] 26324 x0 := v.Args[0] 26325 if x0.Op != OpAMD64MOVWload { 26326 break 26327 } 26328 i0 := x0.AuxInt 26329 s := x0.Aux 26330 mem := x0.Args[1] 26331 p := x0.Args[0] 26332 sh := v.Args[1] 26333 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 26334 break 26335 } 26336 x1 := sh.Args[0] 26337 if x1.Op != OpAMD64MOVWload { 26338 break 26339 } 26340 i1 := x1.AuxInt 26341 if x1.Aux != s { 26342 break 26343 } 26344 _ = x1.Args[1] 26345 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26346 break 26347 } 26348 b = mergePoint(b, x0, x1) 26349 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 26350 v.reset(OpCopy) 26351 v.AddArg(v0) 26352 v0.AuxInt = i0 26353 v0.Aux = s 26354 v0.AddArg(p) 26355 v0.AddArg(mem) 26356 return true 26357 } 26358 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 26359 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26360 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 26361 for { 26362 _ = v.Args[1] 26363 sh := v.Args[0] 26364 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 26365 break 26366 } 26367 x1 := sh.Args[0] 26368 if x1.Op != OpAMD64MOVWload { 26369 break 26370 } 26371 i1 := x1.AuxInt 26372 s := x1.Aux 26373 mem := x1.Args[1] 26374 p := x1.Args[0] 26375 x0 := v.Args[1] 26376 if x0.Op != OpAMD64MOVWload { 26377 break 26378 } 26379 i0 := x0.AuxInt 26380 if x0.Aux != s { 26381 break 26382 } 26383 _ = x0.Args[1] 26384 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26385 break 26386 } 26387 b = mergePoint(b, x0, x1) 26388 v0 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 26389 v.reset(OpCopy) 26390 v.AddArg(v0) 26391 v0.AuxInt = i0 26392 v0.Aux = s 26393 v0.AddArg(p) 26394 v0.AddArg(mem) 26395 return true 26396 } 26397 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 26398 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26399 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 26400 for { 26401 _ = v.Args[1] 26402 s1 := v.Args[0] 26403 if s1.Op != OpAMD64SHLLconst { 26404 break 26405 } 26406 j1 := s1.AuxInt 26407 x1 := s1.Args[0] 26408 if x1.Op != OpAMD64MOVBload { 26409 break 26410 } 26411 i1 := x1.AuxInt 26412 s := x1.Aux 26413 mem := x1.Args[1] 26414 p := x1.Args[0] 26415 or := v.Args[1] 26416 if or.Op != OpAMD64ORL { 26417 break 26418 } 26419 y := or.Args[1] 26420 s0 := or.Args[0] 26421 if s0.Op != OpAMD64SHLLconst { 26422 break 26423 } 26424 j0 := s0.AuxInt 26425 x0 := s0.Args[0] 26426 if x0.Op != OpAMD64MOVBload { 26427 break 26428 } 26429 i0 := x0.AuxInt 26430 if x0.Aux != s { 26431 break 26432 } 26433 _ = x0.Args[1] 26434 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26435 break 26436 } 26437 b = mergePoint(b, x0, x1, y) 26438 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) 26439 v.reset(OpCopy) 26440 v.AddArg(v0) 26441 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) 26442 v1.AuxInt = j0 26443 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 26444 v2.AuxInt = i0 26445 v2.Aux = s 26446 v2.AddArg(p) 26447 v2.AddArg(mem) 26448 v1.AddArg(v2) 26449 v0.AddArg(v1) 26450 v0.AddArg(y) 26451 return true 26452 } 26453 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 26454 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26455 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 26456 for { 26457 _ = v.Args[1] 26458 s1 := v.Args[0] 26459 if s1.Op != OpAMD64SHLLconst { 26460 break 26461 } 26462 j1 := s1.AuxInt 26463 x1 := s1.Args[0] 26464 if x1.Op != OpAMD64MOVBload { 26465 break 26466 } 26467 i1 := x1.AuxInt 26468 s := x1.Aux 26469 mem := x1.Args[1] 26470 p := x1.Args[0] 26471 or := v.Args[1] 26472 if or.Op != OpAMD64ORL { 26473 break 26474 } 26475 _ = or.Args[1] 26476 y := or.Args[0] 26477 s0 := or.Args[1] 26478 if s0.Op != OpAMD64SHLLconst { 26479 break 26480 } 26481 j0 := s0.AuxInt 26482 x0 := s0.Args[0] 26483 if x0.Op != OpAMD64MOVBload { 26484 break 26485 } 26486 i0 := x0.AuxInt 26487 if x0.Aux != s { 26488 break 26489 } 26490 _ = x0.Args[1] 26491 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26492 break 26493 } 26494 b = mergePoint(b, x0, x1, y) 26495 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) 26496 v.reset(OpCopy) 26497 v.AddArg(v0) 26498 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) 26499 v1.AuxInt = j0 26500 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 26501 v2.AuxInt = i0 26502 v2.Aux = s 26503 v2.AddArg(p) 26504 v2.AddArg(mem) 26505 v1.AddArg(v2) 26506 v0.AddArg(v1) 26507 v0.AddArg(y) 26508 return true 26509 } 26510 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 26511 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26512 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 26513 for { 26514 _ = v.Args[1] 26515 or := v.Args[0] 26516 if or.Op != OpAMD64ORL { 26517 break 26518 } 26519 y := or.Args[1] 26520 s0 := or.Args[0] 26521 if s0.Op != OpAMD64SHLLconst { 26522 break 26523 } 26524 j0 := s0.AuxInt 26525 x0 := s0.Args[0] 26526 if x0.Op != OpAMD64MOVBload { 26527 break 26528 } 26529 i0 := x0.AuxInt 26530 s := x0.Aux 26531 mem := x0.Args[1] 26532 p := x0.Args[0] 26533 s1 := v.Args[1] 26534 if s1.Op != OpAMD64SHLLconst { 26535 break 26536 } 26537 j1 := s1.AuxInt 26538 x1 := s1.Args[0] 26539 if x1.Op != OpAMD64MOVBload { 26540 break 26541 } 26542 i1 := x1.AuxInt 26543 if x1.Aux != s { 26544 break 26545 } 26546 _ = x1.Args[1] 26547 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26548 break 26549 } 26550 b = mergePoint(b, x0, x1, y) 26551 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) 26552 v.reset(OpCopy) 26553 v.AddArg(v0) 26554 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) 26555 v1.AuxInt = j0 26556 v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 26557 v2.AuxInt = i0 26558 v2.Aux = s 26559 v2.AddArg(p) 26560 v2.AddArg(mem) 26561 v1.AddArg(v2) 26562 v0.AddArg(v1) 26563 v0.AddArg(y) 26564 return true 26565 } 26566 return false 26567 } 26568 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { 26569 b := v.Block 26570 typ := &b.Func.Config.Types 26571 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 26572 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26573 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 26574 for { 26575 _ = v.Args[1] 26576 or := v.Args[0] 26577 if or.Op != OpAMD64ORL { 26578 break 26579 } 26580 _ = or.Args[1] 26581 y := or.Args[0] 26582 s0 := or.Args[1] 26583 if s0.Op != OpAMD64SHLLconst { 26584 break 26585 } 26586 j0 := s0.AuxInt 26587 x0 := s0.Args[0] 26588 if x0.Op != OpAMD64MOVBload { 26589 break 26590 } 26591 i0 := x0.AuxInt 26592 s := x0.Aux 26593 mem := x0.Args[1] 26594 p := x0.Args[0] 26595 s1 := v.Args[1] 26596 if s1.Op != OpAMD64SHLLconst { 26597 break 26598 } 26599 j1 := s1.AuxInt 26600 x1 := s1.Args[0] 26601 if x1.Op != OpAMD64MOVBload { 26602 break 26603 } 26604 i1 := x1.AuxInt 26605 if x1.Aux != s { 26606 break 26607 } 26608 _ = x1.Args[1] 26609 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26610 break 26611 } 26612 b = mergePoint(b, x0, x1, y) 26613 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) 26614 v.reset(OpCopy) 26615 v.AddArg(v0) 26616 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) 26617 v1.AuxInt = j0 26618 v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 26619 v2.AuxInt = i0 26620 v2.Aux = s 26621 v2.AddArg(p) 26622 v2.AddArg(mem) 26623 v1.AddArg(v2) 26624 v0.AddArg(v1) 26625 v0.AddArg(y) 26626 return true 26627 } 26628 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 26629 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26630 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 26631 for { 26632 _ = v.Args[1] 26633 x0 := v.Args[0] 26634 if x0.Op != OpAMD64MOVBloadidx1 { 26635 break 26636 } 26637 i0 := x0.AuxInt 26638 s := x0.Aux 26639 mem := x0.Args[2] 26640 p := x0.Args[0] 26641 idx := x0.Args[1] 26642 sh := v.Args[1] 26643 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 26644 break 26645 } 26646 x1 := sh.Args[0] 26647 if x1.Op != OpAMD64MOVBloadidx1 { 26648 break 26649 } 26650 i1 := x1.AuxInt 26651 if x1.Aux != s { 26652 break 26653 } 26654 _ = x1.Args[2] 26655 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26656 break 26657 } 26658 b = mergePoint(b, x0, x1) 26659 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 26660 v.reset(OpCopy) 26661 v.AddArg(v0) 26662 v0.AuxInt = i0 26663 v0.Aux = s 26664 v0.AddArg(p) 26665 v0.AddArg(idx) 26666 v0.AddArg(mem) 26667 return true 26668 } 26669 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 26670 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26671 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 26672 for { 26673 _ = v.Args[1] 26674 x0 := v.Args[0] 26675 if x0.Op != OpAMD64MOVBloadidx1 { 26676 break 26677 } 26678 i0 := x0.AuxInt 26679 s := x0.Aux 26680 mem := x0.Args[2] 26681 idx := x0.Args[0] 26682 p := x0.Args[1] 26683 sh := v.Args[1] 26684 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 26685 break 26686 } 26687 x1 := sh.Args[0] 26688 if x1.Op != OpAMD64MOVBloadidx1 { 26689 break 26690 } 26691 i1 := x1.AuxInt 26692 if x1.Aux != s { 26693 break 26694 } 26695 _ = x1.Args[2] 26696 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26697 break 26698 } 26699 b = mergePoint(b, x0, x1) 26700 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 26701 v.reset(OpCopy) 26702 v.AddArg(v0) 26703 v0.AuxInt = i0 26704 v0.Aux = s 26705 v0.AddArg(p) 26706 v0.AddArg(idx) 26707 v0.AddArg(mem) 26708 return true 26709 } 26710 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 26711 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26712 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 26713 for { 26714 _ = v.Args[1] 26715 x0 := v.Args[0] 26716 if x0.Op != OpAMD64MOVBloadidx1 { 26717 break 26718 } 26719 i0 := x0.AuxInt 26720 s := x0.Aux 26721 mem := x0.Args[2] 26722 p := x0.Args[0] 26723 idx := x0.Args[1] 26724 sh := v.Args[1] 26725 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 26726 break 26727 } 26728 x1 := sh.Args[0] 26729 if x1.Op != OpAMD64MOVBloadidx1 { 26730 break 26731 } 26732 i1 := x1.AuxInt 26733 if x1.Aux != s { 26734 break 26735 } 26736 _ = x1.Args[2] 26737 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26738 break 26739 } 26740 b = mergePoint(b, x0, x1) 26741 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 26742 v.reset(OpCopy) 26743 v.AddArg(v0) 26744 v0.AuxInt = i0 26745 v0.Aux = s 26746 v0.AddArg(p) 26747 v0.AddArg(idx) 26748 v0.AddArg(mem) 26749 return true 26750 } 26751 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 26752 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26753 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 26754 for { 26755 _ = v.Args[1] 26756 x0 := v.Args[0] 26757 if x0.Op != OpAMD64MOVBloadidx1 { 26758 break 26759 } 26760 i0 := x0.AuxInt 26761 s := x0.Aux 26762 mem := x0.Args[2] 26763 idx := x0.Args[0] 26764 p := x0.Args[1] 26765 sh := v.Args[1] 26766 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 26767 break 26768 } 26769 x1 := sh.Args[0] 26770 if x1.Op != OpAMD64MOVBloadidx1 { 26771 break 26772 } 26773 i1 := x1.AuxInt 26774 if x1.Aux != s { 26775 break 26776 } 26777 _ = x1.Args[2] 26778 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26779 break 26780 } 26781 b = mergePoint(b, x0, x1) 26782 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 26783 v.reset(OpCopy) 26784 v.AddArg(v0) 26785 v0.AuxInt = i0 26786 v0.Aux = s 26787 v0.AddArg(p) 26788 v0.AddArg(idx) 26789 v0.AddArg(mem) 26790 return true 26791 } 26792 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 26793 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26794 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 26795 for { 26796 _ = v.Args[1] 26797 sh := v.Args[0] 26798 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 26799 break 26800 } 26801 x1 := sh.Args[0] 26802 if x1.Op != OpAMD64MOVBloadidx1 { 26803 break 26804 } 26805 i1 := x1.AuxInt 26806 s := x1.Aux 26807 mem := x1.Args[2] 26808 p := x1.Args[0] 26809 idx := x1.Args[1] 26810 x0 := v.Args[1] 26811 if x0.Op != OpAMD64MOVBloadidx1 { 26812 break 26813 } 26814 i0 := x0.AuxInt 26815 if x0.Aux != s { 26816 break 26817 } 26818 _ = x0.Args[2] 26819 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26820 break 26821 } 26822 b = mergePoint(b, x0, x1) 26823 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 26824 v.reset(OpCopy) 26825 v.AddArg(v0) 26826 v0.AuxInt = i0 26827 v0.Aux = s 26828 v0.AddArg(p) 26829 v0.AddArg(idx) 26830 v0.AddArg(mem) 26831 return true 26832 } 26833 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 26834 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26835 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 26836 for { 26837 _ = v.Args[1] 26838 sh := v.Args[0] 26839 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 26840 break 26841 } 26842 x1 := sh.Args[0] 26843 if x1.Op != OpAMD64MOVBloadidx1 { 26844 break 26845 } 26846 i1 := x1.AuxInt 26847 s := x1.Aux 26848 mem := x1.Args[2] 26849 idx := x1.Args[0] 26850 p := x1.Args[1] 26851 x0 := v.Args[1] 26852 if x0.Op != OpAMD64MOVBloadidx1 { 26853 break 26854 } 26855 i0 := x0.AuxInt 26856 if x0.Aux != s { 26857 break 26858 } 26859 _ = x0.Args[2] 26860 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26861 break 26862 } 26863 b = mergePoint(b, x0, x1) 26864 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 26865 v.reset(OpCopy) 26866 v.AddArg(v0) 26867 v0.AuxInt = i0 26868 v0.Aux = s 26869 v0.AddArg(p) 26870 v0.AddArg(idx) 26871 v0.AddArg(mem) 26872 return true 26873 } 26874 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 26875 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26876 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 26877 for { 26878 _ = v.Args[1] 26879 sh := v.Args[0] 26880 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 26881 break 26882 } 26883 x1 := sh.Args[0] 26884 if x1.Op != OpAMD64MOVBloadidx1 { 26885 break 26886 } 26887 i1 := x1.AuxInt 26888 s := x1.Aux 26889 mem := x1.Args[2] 26890 p := x1.Args[0] 26891 idx := x1.Args[1] 26892 x0 := v.Args[1] 26893 if x0.Op != OpAMD64MOVBloadidx1 { 26894 break 26895 } 26896 i0 := x0.AuxInt 26897 if x0.Aux != s { 26898 break 26899 } 26900 _ = x0.Args[2] 26901 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26902 break 26903 } 26904 b = mergePoint(b, x0, x1) 26905 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 26906 v.reset(OpCopy) 26907 v.AddArg(v0) 26908 v0.AuxInt = i0 26909 v0.Aux = s 26910 v0.AddArg(p) 26911 v0.AddArg(idx) 26912 v0.AddArg(mem) 26913 return true 26914 } 26915 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 26916 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26917 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 26918 for { 26919 _ = v.Args[1] 26920 sh := v.Args[0] 26921 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 26922 break 26923 } 26924 x1 := sh.Args[0] 26925 if x1.Op != OpAMD64MOVBloadidx1 { 26926 break 26927 } 26928 i1 := x1.AuxInt 26929 s := x1.Aux 26930 mem := x1.Args[2] 26931 idx := x1.Args[0] 26932 p := x1.Args[1] 26933 x0 := v.Args[1] 26934 if x0.Op != OpAMD64MOVBloadidx1 { 26935 break 26936 } 26937 i0 := x0.AuxInt 26938 if x0.Aux != s { 26939 break 26940 } 26941 _ = x0.Args[2] 26942 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26943 break 26944 } 26945 b = mergePoint(b, x0, x1) 26946 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 26947 v.reset(OpCopy) 26948 v.AddArg(v0) 26949 v0.AuxInt = i0 26950 v0.Aux = s 26951 v0.AddArg(p) 26952 v0.AddArg(idx) 26953 v0.AddArg(mem) 26954 return true 26955 } 26956 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26957 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26958 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26959 for { 26960 _ = v.Args[1] 26961 x0 := v.Args[0] 26962 if x0.Op != OpAMD64MOVWloadidx1 { 26963 break 26964 } 26965 i0 := x0.AuxInt 26966 s := x0.Aux 26967 mem := x0.Args[2] 26968 p := x0.Args[0] 26969 idx := x0.Args[1] 26970 sh := v.Args[1] 26971 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 26972 break 26973 } 26974 x1 := sh.Args[0] 26975 if x1.Op != OpAMD64MOVWloadidx1 { 26976 break 26977 } 26978 i1 := x1.AuxInt 26979 if x1.Aux != s { 26980 break 26981 } 26982 _ = x1.Args[2] 26983 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26984 break 26985 } 26986 b = mergePoint(b, x0, x1) 26987 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26988 v.reset(OpCopy) 26989 v.AddArg(v0) 26990 v0.AuxInt = i0 26991 v0.Aux = s 26992 v0.AddArg(p) 26993 v0.AddArg(idx) 26994 v0.AddArg(mem) 26995 return true 26996 } 26997 return false 26998 } 26999 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { 27000 b := v.Block 27001 typ := &b.Func.Config.Types 27002 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 27003 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27004 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 27005 for { 27006 _ = v.Args[1] 27007 x0 := v.Args[0] 27008 if x0.Op != OpAMD64MOVWloadidx1 { 27009 break 27010 } 27011 i0 := x0.AuxInt 27012 s := x0.Aux 27013 mem := x0.Args[2] 27014 idx := x0.Args[0] 27015 p := x0.Args[1] 27016 sh := v.Args[1] 27017 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 27018 break 27019 } 27020 x1 := sh.Args[0] 27021 if x1.Op != OpAMD64MOVWloadidx1 { 27022 break 27023 } 27024 i1 := x1.AuxInt 27025 if x1.Aux != s { 27026 break 27027 } 27028 _ = x1.Args[2] 27029 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27030 break 27031 } 27032 b = mergePoint(b, x0, x1) 27033 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27034 v.reset(OpCopy) 27035 v.AddArg(v0) 27036 v0.AuxInt = i0 27037 v0.Aux = s 27038 v0.AddArg(p) 27039 v0.AddArg(idx) 27040 v0.AddArg(mem) 27041 return true 27042 } 27043 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27044 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27045 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 27046 for { 27047 _ = v.Args[1] 27048 x0 := v.Args[0] 27049 if x0.Op != OpAMD64MOVWloadidx1 { 27050 break 27051 } 27052 i0 := x0.AuxInt 27053 s := x0.Aux 27054 mem := x0.Args[2] 27055 p := x0.Args[0] 27056 idx := x0.Args[1] 27057 sh := v.Args[1] 27058 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 27059 break 27060 } 27061 x1 := sh.Args[0] 27062 if x1.Op != OpAMD64MOVWloadidx1 { 27063 break 27064 } 27065 i1 := x1.AuxInt 27066 if x1.Aux != s { 27067 break 27068 } 27069 _ = x1.Args[2] 27070 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27071 break 27072 } 27073 b = mergePoint(b, x0, x1) 27074 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27075 v.reset(OpCopy) 27076 v.AddArg(v0) 27077 v0.AuxInt = i0 27078 v0.Aux = s 27079 v0.AddArg(p) 27080 v0.AddArg(idx) 27081 v0.AddArg(mem) 27082 return true 27083 } 27084 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27085 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27086 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 27087 for { 27088 _ = v.Args[1] 27089 x0 := v.Args[0] 27090 if x0.Op != OpAMD64MOVWloadidx1 { 27091 break 27092 } 27093 i0 := x0.AuxInt 27094 s := x0.Aux 27095 mem := x0.Args[2] 27096 idx := x0.Args[0] 27097 p := x0.Args[1] 27098 sh := v.Args[1] 27099 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 27100 break 27101 } 27102 x1 := sh.Args[0] 27103 if x1.Op != OpAMD64MOVWloadidx1 { 27104 break 27105 } 27106 i1 := x1.AuxInt 27107 if x1.Aux != s { 27108 break 27109 } 27110 _ = x1.Args[2] 27111 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27112 break 27113 } 27114 b = mergePoint(b, x0, x1) 27115 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27116 v.reset(OpCopy) 27117 v.AddArg(v0) 27118 v0.AuxInt = i0 27119 v0.Aux = s 27120 v0.AddArg(p) 27121 v0.AddArg(idx) 27122 v0.AddArg(mem) 27123 return true 27124 } 27125 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 27126 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27127 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 27128 for { 27129 _ = v.Args[1] 27130 sh := v.Args[0] 27131 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 27132 break 27133 } 27134 x1 := sh.Args[0] 27135 if x1.Op != OpAMD64MOVWloadidx1 { 27136 break 27137 } 27138 i1 := x1.AuxInt 27139 s := x1.Aux 27140 mem := x1.Args[2] 27141 p := x1.Args[0] 27142 idx := x1.Args[1] 27143 x0 := v.Args[1] 27144 if x0.Op != OpAMD64MOVWloadidx1 { 27145 break 27146 } 27147 i0 := x0.AuxInt 27148 if x0.Aux != s { 27149 break 27150 } 27151 _ = x0.Args[2] 27152 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27153 break 27154 } 27155 b = mergePoint(b, x0, x1) 27156 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27157 v.reset(OpCopy) 27158 v.AddArg(v0) 27159 v0.AuxInt = i0 27160 v0.Aux = s 27161 v0.AddArg(p) 27162 v0.AddArg(idx) 27163 v0.AddArg(mem) 27164 return true 27165 } 27166 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 27167 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27168 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 27169 for { 27170 _ = v.Args[1] 27171 sh := v.Args[0] 27172 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 27173 break 27174 } 27175 x1 := sh.Args[0] 27176 if x1.Op != OpAMD64MOVWloadidx1 { 27177 break 27178 } 27179 i1 := x1.AuxInt 27180 s := x1.Aux 27181 mem := x1.Args[2] 27182 idx := x1.Args[0] 27183 p := x1.Args[1] 27184 x0 := v.Args[1] 27185 if x0.Op != OpAMD64MOVWloadidx1 { 27186 break 27187 } 27188 i0 := x0.AuxInt 27189 if x0.Aux != s { 27190 break 27191 } 27192 _ = x0.Args[2] 27193 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27194 break 27195 } 27196 b = mergePoint(b, x0, x1) 27197 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27198 v.reset(OpCopy) 27199 v.AddArg(v0) 27200 v0.AuxInt = i0 27201 v0.Aux = s 27202 v0.AddArg(p) 27203 v0.AddArg(idx) 27204 v0.AddArg(mem) 27205 return true 27206 } 27207 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 27208 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27209 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 27210 for { 27211 _ = v.Args[1] 27212 sh := v.Args[0] 27213 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 27214 break 27215 } 27216 x1 := sh.Args[0] 27217 if x1.Op != OpAMD64MOVWloadidx1 { 27218 break 27219 } 27220 i1 := x1.AuxInt 27221 s := x1.Aux 27222 mem := x1.Args[2] 27223 p := x1.Args[0] 27224 idx := x1.Args[1] 27225 x0 := v.Args[1] 27226 if x0.Op != OpAMD64MOVWloadidx1 { 27227 break 27228 } 27229 i0 := x0.AuxInt 27230 if x0.Aux != s { 27231 break 27232 } 27233 _ = x0.Args[2] 27234 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27235 break 27236 } 27237 b = mergePoint(b, x0, x1) 27238 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27239 v.reset(OpCopy) 27240 v.AddArg(v0) 27241 v0.AuxInt = i0 27242 v0.Aux = s 27243 v0.AddArg(p) 27244 v0.AddArg(idx) 27245 v0.AddArg(mem) 27246 return true 27247 } 27248 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 27249 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27250 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 27251 for { 27252 _ = v.Args[1] 27253 sh := v.Args[0] 27254 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 27255 break 27256 } 27257 x1 := sh.Args[0] 27258 if x1.Op != OpAMD64MOVWloadidx1 { 27259 break 27260 } 27261 i1 := x1.AuxInt 27262 s := x1.Aux 27263 mem := x1.Args[2] 27264 idx := x1.Args[0] 27265 p := x1.Args[1] 27266 x0 := v.Args[1] 27267 if x0.Op != OpAMD64MOVWloadidx1 { 27268 break 27269 } 27270 i0 := x0.AuxInt 27271 if x0.Aux != s { 27272 break 27273 } 27274 _ = x0.Args[2] 27275 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27276 break 27277 } 27278 b = mergePoint(b, x0, x1) 27279 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27280 v.reset(OpCopy) 27281 v.AddArg(v0) 27282 v0.AuxInt = i0 27283 v0.Aux = s 27284 v0.AddArg(p) 27285 v0.AddArg(idx) 27286 v0.AddArg(mem) 27287 return true 27288 } 27289 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 27290 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27291 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27292 for { 27293 _ = v.Args[1] 27294 s1 := v.Args[0] 27295 if s1.Op != OpAMD64SHLLconst { 27296 break 27297 } 27298 j1 := s1.AuxInt 27299 x1 := s1.Args[0] 27300 if x1.Op != OpAMD64MOVBloadidx1 { 27301 break 27302 } 27303 i1 := x1.AuxInt 27304 s := x1.Aux 27305 mem := x1.Args[2] 27306 p := x1.Args[0] 27307 idx := x1.Args[1] 27308 or := v.Args[1] 27309 if or.Op != OpAMD64ORL { 27310 break 27311 } 27312 y := or.Args[1] 27313 s0 := or.Args[0] 27314 if s0.Op != OpAMD64SHLLconst { 27315 break 27316 } 27317 j0 := s0.AuxInt 27318 x0 := s0.Args[0] 27319 if x0.Op != OpAMD64MOVBloadidx1 { 27320 break 27321 } 27322 i0 := x0.AuxInt 27323 if x0.Aux != s { 27324 break 27325 } 27326 _ = x0.Args[2] 27327 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27328 break 27329 } 27330 b = mergePoint(b, x0, x1, y) 27331 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 27332 v.reset(OpCopy) 27333 v.AddArg(v0) 27334 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 27335 v1.AuxInt = j0 27336 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27337 v2.AuxInt = i0 27338 v2.Aux = s 27339 v2.AddArg(p) 27340 v2.AddArg(idx) 27341 v2.AddArg(mem) 27342 v1.AddArg(v2) 27343 v0.AddArg(v1) 27344 v0.AddArg(y) 27345 return true 27346 } 27347 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 27348 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27349 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27350 for { 27351 _ = v.Args[1] 27352 s1 := v.Args[0] 27353 if s1.Op != OpAMD64SHLLconst { 27354 break 27355 } 27356 j1 := s1.AuxInt 27357 x1 := s1.Args[0] 27358 if x1.Op != OpAMD64MOVBloadidx1 { 27359 break 27360 } 27361 i1 := x1.AuxInt 27362 s := x1.Aux 27363 mem := x1.Args[2] 27364 idx := x1.Args[0] 27365 p := x1.Args[1] 27366 or := v.Args[1] 27367 if or.Op != OpAMD64ORL { 27368 break 27369 } 27370 y := or.Args[1] 27371 s0 := or.Args[0] 27372 if s0.Op != OpAMD64SHLLconst { 27373 break 27374 } 27375 j0 := s0.AuxInt 27376 x0 := s0.Args[0] 27377 if x0.Op != OpAMD64MOVBloadidx1 { 27378 break 27379 } 27380 i0 := x0.AuxInt 27381 if x0.Aux != s { 27382 break 27383 } 27384 _ = x0.Args[2] 27385 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27386 break 27387 } 27388 b = mergePoint(b, x0, x1, y) 27389 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 27390 v.reset(OpCopy) 27391 v.AddArg(v0) 27392 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 27393 v1.AuxInt = j0 27394 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27395 v2.AuxInt = i0 27396 v2.Aux = s 27397 v2.AddArg(p) 27398 v2.AddArg(idx) 27399 v2.AddArg(mem) 27400 v1.AddArg(v2) 27401 v0.AddArg(v1) 27402 v0.AddArg(y) 27403 return true 27404 } 27405 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 27406 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27407 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27408 for { 27409 _ = v.Args[1] 27410 s1 := v.Args[0] 27411 if s1.Op != OpAMD64SHLLconst { 27412 break 27413 } 27414 j1 := s1.AuxInt 27415 x1 := s1.Args[0] 27416 if x1.Op != OpAMD64MOVBloadidx1 { 27417 break 27418 } 27419 i1 := x1.AuxInt 27420 s := x1.Aux 27421 mem := x1.Args[2] 27422 p := x1.Args[0] 27423 idx := x1.Args[1] 27424 or := v.Args[1] 27425 if or.Op != OpAMD64ORL { 27426 break 27427 } 27428 y := or.Args[1] 27429 s0 := or.Args[0] 27430 if s0.Op != OpAMD64SHLLconst { 27431 break 27432 } 27433 j0 := s0.AuxInt 27434 x0 := s0.Args[0] 27435 if x0.Op != OpAMD64MOVBloadidx1 { 27436 break 27437 } 27438 i0 := x0.AuxInt 27439 if x0.Aux != s { 27440 break 27441 } 27442 _ = x0.Args[2] 27443 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27444 break 27445 } 27446 b = mergePoint(b, x0, x1, y) 27447 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 27448 v.reset(OpCopy) 27449 v.AddArg(v0) 27450 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 27451 v1.AuxInt = j0 27452 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27453 v2.AuxInt = i0 27454 v2.Aux = s 27455 v2.AddArg(p) 27456 v2.AddArg(idx) 27457 v2.AddArg(mem) 27458 v1.AddArg(v2) 27459 v0.AddArg(v1) 27460 v0.AddArg(y) 27461 return true 27462 } 27463 return false 27464 } 27465 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { 27466 b := v.Block 27467 typ := &b.Func.Config.Types 27468 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 27469 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27470 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27471 for { 27472 _ = v.Args[1] 27473 s1 := v.Args[0] 27474 if s1.Op != OpAMD64SHLLconst { 27475 break 27476 } 27477 j1 := s1.AuxInt 27478 x1 := s1.Args[0] 27479 if x1.Op != OpAMD64MOVBloadidx1 { 27480 break 27481 } 27482 i1 := x1.AuxInt 27483 s := x1.Aux 27484 mem := x1.Args[2] 27485 idx := x1.Args[0] 27486 p := x1.Args[1] 27487 or := v.Args[1] 27488 if or.Op != OpAMD64ORL { 27489 break 27490 } 27491 y := or.Args[1] 27492 s0 := or.Args[0] 27493 if s0.Op != OpAMD64SHLLconst { 27494 break 27495 } 27496 j0 := s0.AuxInt 27497 x0 := s0.Args[0] 27498 if x0.Op != OpAMD64MOVBloadidx1 { 27499 break 27500 } 27501 i0 := x0.AuxInt 27502 if x0.Aux != s { 27503 break 27504 } 27505 _ = x0.Args[2] 27506 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27507 break 27508 } 27509 b = mergePoint(b, x0, x1, y) 27510 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 27511 v.reset(OpCopy) 27512 v.AddArg(v0) 27513 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 27514 v1.AuxInt = j0 27515 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27516 v2.AuxInt = i0 27517 v2.Aux = s 27518 v2.AddArg(p) 27519 v2.AddArg(idx) 27520 v2.AddArg(mem) 27521 v1.AddArg(v2) 27522 v0.AddArg(v1) 27523 v0.AddArg(y) 27524 return true 27525 } 27526 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 27527 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27528 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27529 for { 27530 _ = v.Args[1] 27531 s1 := v.Args[0] 27532 if s1.Op != OpAMD64SHLLconst { 27533 break 27534 } 27535 j1 := s1.AuxInt 27536 x1 := s1.Args[0] 27537 if x1.Op != OpAMD64MOVBloadidx1 { 27538 break 27539 } 27540 i1 := x1.AuxInt 27541 s := x1.Aux 27542 mem := x1.Args[2] 27543 p := x1.Args[0] 27544 idx := x1.Args[1] 27545 or := v.Args[1] 27546 if or.Op != OpAMD64ORL { 27547 break 27548 } 27549 _ = or.Args[1] 27550 y := or.Args[0] 27551 s0 := or.Args[1] 27552 if s0.Op != OpAMD64SHLLconst { 27553 break 27554 } 27555 j0 := s0.AuxInt 27556 x0 := s0.Args[0] 27557 if x0.Op != OpAMD64MOVBloadidx1 { 27558 break 27559 } 27560 i0 := x0.AuxInt 27561 if x0.Aux != s { 27562 break 27563 } 27564 _ = x0.Args[2] 27565 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27566 break 27567 } 27568 b = mergePoint(b, x0, x1, y) 27569 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 27570 v.reset(OpCopy) 27571 v.AddArg(v0) 27572 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 27573 v1.AuxInt = j0 27574 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27575 v2.AuxInt = i0 27576 v2.Aux = s 27577 v2.AddArg(p) 27578 v2.AddArg(idx) 27579 v2.AddArg(mem) 27580 v1.AddArg(v2) 27581 v0.AddArg(v1) 27582 v0.AddArg(y) 27583 return true 27584 } 27585 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 27586 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27587 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27588 for { 27589 _ = v.Args[1] 27590 s1 := v.Args[0] 27591 if s1.Op != OpAMD64SHLLconst { 27592 break 27593 } 27594 j1 := s1.AuxInt 27595 x1 := s1.Args[0] 27596 if x1.Op != OpAMD64MOVBloadidx1 { 27597 break 27598 } 27599 i1 := x1.AuxInt 27600 s := x1.Aux 27601 mem := x1.Args[2] 27602 idx := x1.Args[0] 27603 p := x1.Args[1] 27604 or := v.Args[1] 27605 if or.Op != OpAMD64ORL { 27606 break 27607 } 27608 _ = or.Args[1] 27609 y := or.Args[0] 27610 s0 := or.Args[1] 27611 if s0.Op != OpAMD64SHLLconst { 27612 break 27613 } 27614 j0 := s0.AuxInt 27615 x0 := s0.Args[0] 27616 if x0.Op != OpAMD64MOVBloadidx1 { 27617 break 27618 } 27619 i0 := x0.AuxInt 27620 if x0.Aux != s { 27621 break 27622 } 27623 _ = x0.Args[2] 27624 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27625 break 27626 } 27627 b = mergePoint(b, x0, x1, y) 27628 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 27629 v.reset(OpCopy) 27630 v.AddArg(v0) 27631 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 27632 v1.AuxInt = j0 27633 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27634 v2.AuxInt = i0 27635 v2.Aux = s 27636 v2.AddArg(p) 27637 v2.AddArg(idx) 27638 v2.AddArg(mem) 27639 v1.AddArg(v2) 27640 v0.AddArg(v1) 27641 v0.AddArg(y) 27642 return true 27643 } 27644 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 27645 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27646 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27647 for { 27648 _ = v.Args[1] 27649 s1 := v.Args[0] 27650 if s1.Op != OpAMD64SHLLconst { 27651 break 27652 } 27653 j1 := s1.AuxInt 27654 x1 := s1.Args[0] 27655 if x1.Op != OpAMD64MOVBloadidx1 { 27656 break 27657 } 27658 i1 := x1.AuxInt 27659 s := x1.Aux 27660 mem := x1.Args[2] 27661 p := x1.Args[0] 27662 idx := x1.Args[1] 27663 or := v.Args[1] 27664 if or.Op != OpAMD64ORL { 27665 break 27666 } 27667 _ = or.Args[1] 27668 y := or.Args[0] 27669 s0 := or.Args[1] 27670 if s0.Op != OpAMD64SHLLconst { 27671 break 27672 } 27673 j0 := s0.AuxInt 27674 x0 := s0.Args[0] 27675 if x0.Op != OpAMD64MOVBloadidx1 { 27676 break 27677 } 27678 i0 := x0.AuxInt 27679 if x0.Aux != s { 27680 break 27681 } 27682 _ = x0.Args[2] 27683 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27684 break 27685 } 27686 b = mergePoint(b, x0, x1, y) 27687 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 27688 v.reset(OpCopy) 27689 v.AddArg(v0) 27690 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 27691 v1.AuxInt = j0 27692 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27693 v2.AuxInt = i0 27694 v2.Aux = s 27695 v2.AddArg(p) 27696 v2.AddArg(idx) 27697 v2.AddArg(mem) 27698 v1.AddArg(v2) 27699 v0.AddArg(v1) 27700 v0.AddArg(y) 27701 return true 27702 } 27703 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 27704 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27705 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27706 for { 27707 _ = v.Args[1] 27708 s1 := v.Args[0] 27709 if s1.Op != OpAMD64SHLLconst { 27710 break 27711 } 27712 j1 := s1.AuxInt 27713 x1 := s1.Args[0] 27714 if x1.Op != OpAMD64MOVBloadidx1 { 27715 break 27716 } 27717 i1 := x1.AuxInt 27718 s := x1.Aux 27719 mem := x1.Args[2] 27720 idx := x1.Args[0] 27721 p := x1.Args[1] 27722 or := v.Args[1] 27723 if or.Op != OpAMD64ORL { 27724 break 27725 } 27726 _ = or.Args[1] 27727 y := or.Args[0] 27728 s0 := or.Args[1] 27729 if s0.Op != OpAMD64SHLLconst { 27730 break 27731 } 27732 j0 := s0.AuxInt 27733 x0 := s0.Args[0] 27734 if x0.Op != OpAMD64MOVBloadidx1 { 27735 break 27736 } 27737 i0 := x0.AuxInt 27738 if x0.Aux != s { 27739 break 27740 } 27741 _ = x0.Args[2] 27742 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27743 break 27744 } 27745 b = mergePoint(b, x0, x1, y) 27746 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 27747 v.reset(OpCopy) 27748 v.AddArg(v0) 27749 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 27750 v1.AuxInt = j0 27751 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27752 v2.AuxInt = i0 27753 v2.Aux = s 27754 v2.AddArg(p) 27755 v2.AddArg(idx) 27756 v2.AddArg(mem) 27757 v1.AddArg(v2) 27758 v0.AddArg(v1) 27759 v0.AddArg(y) 27760 return true 27761 } 27762 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27763 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27764 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27765 for { 27766 _ = v.Args[1] 27767 or := v.Args[0] 27768 if or.Op != OpAMD64ORL { 27769 break 27770 } 27771 y := or.Args[1] 27772 s0 := or.Args[0] 27773 if s0.Op != OpAMD64SHLLconst { 27774 break 27775 } 27776 j0 := s0.AuxInt 27777 x0 := s0.Args[0] 27778 if x0.Op != OpAMD64MOVBloadidx1 { 27779 break 27780 } 27781 i0 := x0.AuxInt 27782 s := x0.Aux 27783 mem := x0.Args[2] 27784 p := x0.Args[0] 27785 idx := x0.Args[1] 27786 s1 := v.Args[1] 27787 if s1.Op != OpAMD64SHLLconst { 27788 break 27789 } 27790 j1 := s1.AuxInt 27791 x1 := s1.Args[0] 27792 if x1.Op != OpAMD64MOVBloadidx1 { 27793 break 27794 } 27795 i1 := x1.AuxInt 27796 if x1.Aux != s { 27797 break 27798 } 27799 _ = x1.Args[2] 27800 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27801 break 27802 } 27803 b = mergePoint(b, x0, x1, y) 27804 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 27805 v.reset(OpCopy) 27806 v.AddArg(v0) 27807 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 27808 v1.AuxInt = j0 27809 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27810 v2.AuxInt = i0 27811 v2.Aux = s 27812 v2.AddArg(p) 27813 v2.AddArg(idx) 27814 v2.AddArg(mem) 27815 v1.AddArg(v2) 27816 v0.AddArg(v1) 27817 v0.AddArg(y) 27818 return true 27819 } 27820 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27821 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27822 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27823 for { 27824 _ = v.Args[1] 27825 or := v.Args[0] 27826 if or.Op != OpAMD64ORL { 27827 break 27828 } 27829 y := or.Args[1] 27830 s0 := or.Args[0] 27831 if s0.Op != OpAMD64SHLLconst { 27832 break 27833 } 27834 j0 := s0.AuxInt 27835 x0 := s0.Args[0] 27836 if x0.Op != OpAMD64MOVBloadidx1 { 27837 break 27838 } 27839 i0 := x0.AuxInt 27840 s := x0.Aux 27841 mem := x0.Args[2] 27842 idx := x0.Args[0] 27843 p := x0.Args[1] 27844 s1 := v.Args[1] 27845 if s1.Op != OpAMD64SHLLconst { 27846 break 27847 } 27848 j1 := s1.AuxInt 27849 x1 := s1.Args[0] 27850 if x1.Op != OpAMD64MOVBloadidx1 { 27851 break 27852 } 27853 i1 := x1.AuxInt 27854 if x1.Aux != s { 27855 break 27856 } 27857 _ = x1.Args[2] 27858 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27859 break 27860 } 27861 b = mergePoint(b, x0, x1, y) 27862 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 27863 v.reset(OpCopy) 27864 v.AddArg(v0) 27865 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 27866 v1.AuxInt = j0 27867 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27868 v2.AuxInt = i0 27869 v2.Aux = s 27870 v2.AddArg(p) 27871 v2.AddArg(idx) 27872 v2.AddArg(mem) 27873 v1.AddArg(v2) 27874 v0.AddArg(v1) 27875 v0.AddArg(y) 27876 return true 27877 } 27878 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27879 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27880 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27881 for { 27882 _ = v.Args[1] 27883 or := v.Args[0] 27884 if or.Op != OpAMD64ORL { 27885 break 27886 } 27887 _ = or.Args[1] 27888 y := or.Args[0] 27889 s0 := or.Args[1] 27890 if s0.Op != OpAMD64SHLLconst { 27891 break 27892 } 27893 j0 := s0.AuxInt 27894 x0 := s0.Args[0] 27895 if x0.Op != OpAMD64MOVBloadidx1 { 27896 break 27897 } 27898 i0 := x0.AuxInt 27899 s := x0.Aux 27900 mem := x0.Args[2] 27901 p := x0.Args[0] 27902 idx := x0.Args[1] 27903 s1 := v.Args[1] 27904 if s1.Op != OpAMD64SHLLconst { 27905 break 27906 } 27907 j1 := s1.AuxInt 27908 x1 := s1.Args[0] 27909 if x1.Op != OpAMD64MOVBloadidx1 { 27910 break 27911 } 27912 i1 := x1.AuxInt 27913 if x1.Aux != s { 27914 break 27915 } 27916 _ = x1.Args[2] 27917 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27918 break 27919 } 27920 b = mergePoint(b, x0, x1, y) 27921 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 27922 v.reset(OpCopy) 27923 v.AddArg(v0) 27924 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 27925 v1.AuxInt = j0 27926 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27927 v2.AuxInt = i0 27928 v2.Aux = s 27929 v2.AddArg(p) 27930 v2.AddArg(idx) 27931 v2.AddArg(mem) 27932 v1.AddArg(v2) 27933 v0.AddArg(v1) 27934 v0.AddArg(y) 27935 return true 27936 } 27937 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27938 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27939 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27940 for { 27941 _ = v.Args[1] 27942 or := v.Args[0] 27943 if or.Op != OpAMD64ORL { 27944 break 27945 } 27946 _ = or.Args[1] 27947 y := or.Args[0] 27948 s0 := or.Args[1] 27949 if s0.Op != OpAMD64SHLLconst { 27950 break 27951 } 27952 j0 := s0.AuxInt 27953 x0 := s0.Args[0] 27954 if x0.Op != OpAMD64MOVBloadidx1 { 27955 break 27956 } 27957 i0 := x0.AuxInt 27958 s := x0.Aux 27959 mem := x0.Args[2] 27960 idx := x0.Args[0] 27961 p := x0.Args[1] 27962 s1 := v.Args[1] 27963 if s1.Op != OpAMD64SHLLconst { 27964 break 27965 } 27966 j1 := s1.AuxInt 27967 x1 := s1.Args[0] 27968 if x1.Op != OpAMD64MOVBloadidx1 { 27969 break 27970 } 27971 i1 := x1.AuxInt 27972 if x1.Aux != s { 27973 break 27974 } 27975 _ = x1.Args[2] 27976 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27977 break 27978 } 27979 b = mergePoint(b, x0, x1, y) 27980 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 27981 v.reset(OpCopy) 27982 v.AddArg(v0) 27983 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 27984 v1.AuxInt = j0 27985 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27986 v2.AuxInt = i0 27987 v2.Aux = s 27988 v2.AddArg(p) 27989 v2.AddArg(idx) 27990 v2.AddArg(mem) 27991 v1.AddArg(v2) 27992 v0.AddArg(v1) 27993 v0.AddArg(y) 27994 return true 27995 } 27996 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27997 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27998 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27999 for { 28000 _ = v.Args[1] 28001 or := v.Args[0] 28002 if or.Op != OpAMD64ORL { 28003 break 28004 } 28005 y := or.Args[1] 28006 s0 := or.Args[0] 28007 if s0.Op != OpAMD64SHLLconst { 28008 break 28009 } 28010 j0 := s0.AuxInt 28011 x0 := s0.Args[0] 28012 if x0.Op != OpAMD64MOVBloadidx1 { 28013 break 28014 } 28015 i0 := x0.AuxInt 28016 s := x0.Aux 28017 mem := x0.Args[2] 28018 p := x0.Args[0] 28019 idx := x0.Args[1] 28020 s1 := v.Args[1] 28021 if s1.Op != OpAMD64SHLLconst { 28022 break 28023 } 28024 j1 := s1.AuxInt 28025 x1 := s1.Args[0] 28026 if x1.Op != OpAMD64MOVBloadidx1 { 28027 break 28028 } 28029 i1 := x1.AuxInt 28030 if x1.Aux != s { 28031 break 28032 } 28033 _ = x1.Args[2] 28034 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28035 break 28036 } 28037 b = mergePoint(b, x0, x1, y) 28038 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 28039 v.reset(OpCopy) 28040 v.AddArg(v0) 28041 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 28042 v1.AuxInt = j0 28043 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28044 v2.AuxInt = i0 28045 v2.Aux = s 28046 v2.AddArg(p) 28047 v2.AddArg(idx) 28048 v2.AddArg(mem) 28049 v1.AddArg(v2) 28050 v0.AddArg(v1) 28051 v0.AddArg(y) 28052 return true 28053 } 28054 return false 28055 } 28056 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { 28057 b := v.Block 28058 typ := &b.Func.Config.Types 28059 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 28060 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28061 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 28062 for { 28063 _ = v.Args[1] 28064 or := v.Args[0] 28065 if or.Op != OpAMD64ORL { 28066 break 28067 } 28068 y := or.Args[1] 28069 s0 := or.Args[0] 28070 if s0.Op != OpAMD64SHLLconst { 28071 break 28072 } 28073 j0 := s0.AuxInt 28074 x0 := s0.Args[0] 28075 if x0.Op != OpAMD64MOVBloadidx1 { 28076 break 28077 } 28078 i0 := x0.AuxInt 28079 s := x0.Aux 28080 mem := x0.Args[2] 28081 idx := x0.Args[0] 28082 p := x0.Args[1] 28083 s1 := v.Args[1] 28084 if s1.Op != OpAMD64SHLLconst { 28085 break 28086 } 28087 j1 := s1.AuxInt 28088 x1 := s1.Args[0] 28089 if x1.Op != OpAMD64MOVBloadidx1 { 28090 break 28091 } 28092 i1 := x1.AuxInt 28093 if x1.Aux != s { 28094 break 28095 } 28096 _ = x1.Args[2] 28097 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28098 break 28099 } 28100 b = mergePoint(b, x0, x1, y) 28101 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 28102 v.reset(OpCopy) 28103 v.AddArg(v0) 28104 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 28105 v1.AuxInt = j0 28106 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28107 v2.AuxInt = i0 28108 v2.Aux = s 28109 v2.AddArg(p) 28110 v2.AddArg(idx) 28111 v2.AddArg(mem) 28112 v1.AddArg(v2) 28113 v0.AddArg(v1) 28114 v0.AddArg(y) 28115 return true 28116 } 28117 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 28118 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28119 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 28120 for { 28121 _ = v.Args[1] 28122 or := v.Args[0] 28123 if or.Op != OpAMD64ORL { 28124 break 28125 } 28126 _ = or.Args[1] 28127 y := or.Args[0] 28128 s0 := or.Args[1] 28129 if s0.Op != OpAMD64SHLLconst { 28130 break 28131 } 28132 j0 := s0.AuxInt 28133 x0 := s0.Args[0] 28134 if x0.Op != OpAMD64MOVBloadidx1 { 28135 break 28136 } 28137 i0 := x0.AuxInt 28138 s := x0.Aux 28139 mem := x0.Args[2] 28140 p := x0.Args[0] 28141 idx := x0.Args[1] 28142 s1 := v.Args[1] 28143 if s1.Op != OpAMD64SHLLconst { 28144 break 28145 } 28146 j1 := s1.AuxInt 28147 x1 := s1.Args[0] 28148 if x1.Op != OpAMD64MOVBloadidx1 { 28149 break 28150 } 28151 i1 := x1.AuxInt 28152 if x1.Aux != s { 28153 break 28154 } 28155 _ = x1.Args[2] 28156 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28157 break 28158 } 28159 b = mergePoint(b, x0, x1, y) 28160 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 28161 v.reset(OpCopy) 28162 v.AddArg(v0) 28163 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 28164 v1.AuxInt = j0 28165 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28166 v2.AuxInt = i0 28167 v2.Aux = s 28168 v2.AddArg(p) 28169 v2.AddArg(idx) 28170 v2.AddArg(mem) 28171 v1.AddArg(v2) 28172 v0.AddArg(v1) 28173 v0.AddArg(y) 28174 return true 28175 } 28176 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 28177 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28178 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 28179 for { 28180 _ = v.Args[1] 28181 or := v.Args[0] 28182 if or.Op != OpAMD64ORL { 28183 break 28184 } 28185 _ = or.Args[1] 28186 y := or.Args[0] 28187 s0 := or.Args[1] 28188 if s0.Op != OpAMD64SHLLconst { 28189 break 28190 } 28191 j0 := s0.AuxInt 28192 x0 := s0.Args[0] 28193 if x0.Op != OpAMD64MOVBloadidx1 { 28194 break 28195 } 28196 i0 := x0.AuxInt 28197 s := x0.Aux 28198 mem := x0.Args[2] 28199 idx := x0.Args[0] 28200 p := x0.Args[1] 28201 s1 := v.Args[1] 28202 if s1.Op != OpAMD64SHLLconst { 28203 break 28204 } 28205 j1 := s1.AuxInt 28206 x1 := s1.Args[0] 28207 if x1.Op != OpAMD64MOVBloadidx1 { 28208 break 28209 } 28210 i1 := x1.AuxInt 28211 if x1.Aux != s { 28212 break 28213 } 28214 _ = x1.Args[2] 28215 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28216 break 28217 } 28218 b = mergePoint(b, x0, x1, y) 28219 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 28220 v.reset(OpCopy) 28221 v.AddArg(v0) 28222 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 28223 v1.AuxInt = j0 28224 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28225 v2.AuxInt = i0 28226 v2.Aux = s 28227 v2.AddArg(p) 28228 v2.AddArg(idx) 28229 v2.AddArg(mem) 28230 v1.AddArg(v2) 28231 v0.AddArg(v1) 28232 v0.AddArg(y) 28233 return true 28234 } 28235 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 28236 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28237 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 28238 for { 28239 _ = v.Args[1] 28240 x1 := v.Args[0] 28241 if x1.Op != OpAMD64MOVBload { 28242 break 28243 } 28244 i1 := x1.AuxInt 28245 s := x1.Aux 28246 mem := x1.Args[1] 28247 p := x1.Args[0] 28248 sh := v.Args[1] 28249 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 28250 break 28251 } 28252 x0 := sh.Args[0] 28253 if x0.Op != OpAMD64MOVBload { 28254 break 28255 } 28256 i0 := x0.AuxInt 28257 if x0.Aux != s { 28258 break 28259 } 28260 _ = x0.Args[1] 28261 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28262 break 28263 } 28264 b = mergePoint(b, x0, x1) 28265 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) 28266 v.reset(OpCopy) 28267 v.AddArg(v0) 28268 v0.AuxInt = 8 28269 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 28270 v1.AuxInt = i0 28271 v1.Aux = s 28272 v1.AddArg(p) 28273 v1.AddArg(mem) 28274 v0.AddArg(v1) 28275 return true 28276 } 28277 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 28278 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28279 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 28280 for { 28281 _ = v.Args[1] 28282 sh := v.Args[0] 28283 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 28284 break 28285 } 28286 x0 := sh.Args[0] 28287 if x0.Op != OpAMD64MOVBload { 28288 break 28289 } 28290 i0 := x0.AuxInt 28291 s := x0.Aux 28292 mem := x0.Args[1] 28293 p := x0.Args[0] 28294 x1 := v.Args[1] 28295 if x1.Op != OpAMD64MOVBload { 28296 break 28297 } 28298 i1 := x1.AuxInt 28299 if x1.Aux != s { 28300 break 28301 } 28302 _ = x1.Args[1] 28303 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28304 break 28305 } 28306 b = mergePoint(b, x0, x1) 28307 v0 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, v.Type) 28308 v.reset(OpCopy) 28309 v.AddArg(v0) 28310 v0.AuxInt = 8 28311 v1 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 28312 v1.AuxInt = i0 28313 v1.Aux = s 28314 v1.AddArg(p) 28315 v1.AddArg(mem) 28316 v0.AddArg(v1) 28317 return true 28318 } 28319 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 28320 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28321 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 28322 for { 28323 _ = v.Args[1] 28324 r1 := v.Args[0] 28325 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 28326 break 28327 } 28328 x1 := r1.Args[0] 28329 if x1.Op != OpAMD64MOVWload { 28330 break 28331 } 28332 i1 := x1.AuxInt 28333 s := x1.Aux 28334 mem := x1.Args[1] 28335 p := x1.Args[0] 28336 sh := v.Args[1] 28337 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 28338 break 28339 } 28340 r0 := sh.Args[0] 28341 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 28342 break 28343 } 28344 x0 := r0.Args[0] 28345 if x0.Op != OpAMD64MOVWload { 28346 break 28347 } 28348 i0 := x0.AuxInt 28349 if x0.Aux != s { 28350 break 28351 } 28352 _ = x0.Args[1] 28353 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28354 break 28355 } 28356 b = mergePoint(b, x0, x1) 28357 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) 28358 v.reset(OpCopy) 28359 v.AddArg(v0) 28360 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 28361 v1.AuxInt = i0 28362 v1.Aux = s 28363 v1.AddArg(p) 28364 v1.AddArg(mem) 28365 v0.AddArg(v1) 28366 return true 28367 } 28368 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 28369 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28370 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 28371 for { 28372 _ = v.Args[1] 28373 sh := v.Args[0] 28374 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 28375 break 28376 } 28377 r0 := sh.Args[0] 28378 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 28379 break 28380 } 28381 x0 := r0.Args[0] 28382 if x0.Op != OpAMD64MOVWload { 28383 break 28384 } 28385 i0 := x0.AuxInt 28386 s := x0.Aux 28387 mem := x0.Args[1] 28388 p := x0.Args[0] 28389 r1 := v.Args[1] 28390 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 28391 break 28392 } 28393 x1 := r1.Args[0] 28394 if x1.Op != OpAMD64MOVWload { 28395 break 28396 } 28397 i1 := x1.AuxInt 28398 if x1.Aux != s { 28399 break 28400 } 28401 _ = x1.Args[1] 28402 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28403 break 28404 } 28405 b = mergePoint(b, x0, x1) 28406 v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, v.Type) 28407 v.reset(OpCopy) 28408 v.AddArg(v0) 28409 v1 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 28410 v1.AuxInt = i0 28411 v1.Aux = s 28412 v1.AddArg(p) 28413 v1.AddArg(mem) 28414 v0.AddArg(v1) 28415 return true 28416 } 28417 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 28418 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28419 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 28420 for { 28421 _ = v.Args[1] 28422 s0 := v.Args[0] 28423 if s0.Op != OpAMD64SHLLconst { 28424 break 28425 } 28426 j0 := s0.AuxInt 28427 x0 := s0.Args[0] 28428 if x0.Op != OpAMD64MOVBload { 28429 break 28430 } 28431 i0 := x0.AuxInt 28432 s := x0.Aux 28433 mem := x0.Args[1] 28434 p := x0.Args[0] 28435 or := v.Args[1] 28436 if or.Op != OpAMD64ORL { 28437 break 28438 } 28439 y := or.Args[1] 28440 s1 := or.Args[0] 28441 if s1.Op != OpAMD64SHLLconst { 28442 break 28443 } 28444 j1 := s1.AuxInt 28445 x1 := s1.Args[0] 28446 if x1.Op != OpAMD64MOVBload { 28447 break 28448 } 28449 i1 := x1.AuxInt 28450 if x1.Aux != s { 28451 break 28452 } 28453 _ = x1.Args[1] 28454 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28455 break 28456 } 28457 b = mergePoint(b, x0, x1, y) 28458 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) 28459 v.reset(OpCopy) 28460 v.AddArg(v0) 28461 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) 28462 v1.AuxInt = j1 28463 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) 28464 v2.AuxInt = 8 28465 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 28466 v3.AuxInt = i0 28467 v3.Aux = s 28468 v3.AddArg(p) 28469 v3.AddArg(mem) 28470 v2.AddArg(v3) 28471 v1.AddArg(v2) 28472 v0.AddArg(v1) 28473 v0.AddArg(y) 28474 return true 28475 } 28476 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 28477 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28478 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 28479 for { 28480 _ = v.Args[1] 28481 s0 := v.Args[0] 28482 if s0.Op != OpAMD64SHLLconst { 28483 break 28484 } 28485 j0 := s0.AuxInt 28486 x0 := s0.Args[0] 28487 if x0.Op != OpAMD64MOVBload { 28488 break 28489 } 28490 i0 := x0.AuxInt 28491 s := x0.Aux 28492 mem := x0.Args[1] 28493 p := x0.Args[0] 28494 or := v.Args[1] 28495 if or.Op != OpAMD64ORL { 28496 break 28497 } 28498 _ = or.Args[1] 28499 y := or.Args[0] 28500 s1 := or.Args[1] 28501 if s1.Op != OpAMD64SHLLconst { 28502 break 28503 } 28504 j1 := s1.AuxInt 28505 x1 := s1.Args[0] 28506 if x1.Op != OpAMD64MOVBload { 28507 break 28508 } 28509 i1 := x1.AuxInt 28510 if x1.Aux != s { 28511 break 28512 } 28513 _ = x1.Args[1] 28514 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28515 break 28516 } 28517 b = mergePoint(b, x0, x1, y) 28518 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) 28519 v.reset(OpCopy) 28520 v.AddArg(v0) 28521 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) 28522 v1.AuxInt = j1 28523 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) 28524 v2.AuxInt = 8 28525 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 28526 v3.AuxInt = i0 28527 v3.Aux = s 28528 v3.AddArg(p) 28529 v3.AddArg(mem) 28530 v2.AddArg(v3) 28531 v1.AddArg(v2) 28532 v0.AddArg(v1) 28533 v0.AddArg(y) 28534 return true 28535 } 28536 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 28537 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28538 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 28539 for { 28540 _ = v.Args[1] 28541 or := v.Args[0] 28542 if or.Op != OpAMD64ORL { 28543 break 28544 } 28545 y := or.Args[1] 28546 s1 := or.Args[0] 28547 if s1.Op != OpAMD64SHLLconst { 28548 break 28549 } 28550 j1 := s1.AuxInt 28551 x1 := s1.Args[0] 28552 if x1.Op != OpAMD64MOVBload { 28553 break 28554 } 28555 i1 := x1.AuxInt 28556 s := x1.Aux 28557 mem := x1.Args[1] 28558 p := x1.Args[0] 28559 s0 := v.Args[1] 28560 if s0.Op != OpAMD64SHLLconst { 28561 break 28562 } 28563 j0 := s0.AuxInt 28564 x0 := s0.Args[0] 28565 if x0.Op != OpAMD64MOVBload { 28566 break 28567 } 28568 i0 := x0.AuxInt 28569 if x0.Aux != s { 28570 break 28571 } 28572 _ = x0.Args[1] 28573 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28574 break 28575 } 28576 b = mergePoint(b, x0, x1, y) 28577 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) 28578 v.reset(OpCopy) 28579 v.AddArg(v0) 28580 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) 28581 v1.AuxInt = j1 28582 v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) 28583 v2.AuxInt = 8 28584 v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 28585 v3.AuxInt = i0 28586 v3.Aux = s 28587 v3.AddArg(p) 28588 v3.AddArg(mem) 28589 v2.AddArg(v3) 28590 v1.AddArg(v2) 28591 v0.AddArg(v1) 28592 v0.AddArg(y) 28593 return true 28594 } 28595 return false 28596 } 28597 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { 28598 b := v.Block 28599 typ := &b.Func.Config.Types 28600 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 28601 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28602 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 28603 for { 28604 _ = v.Args[1] 28605 or := v.Args[0] 28606 if or.Op != OpAMD64ORL { 28607 break 28608 } 28609 _ = or.Args[1] 28610 y := or.Args[0] 28611 s1 := or.Args[1] 28612 if s1.Op != OpAMD64SHLLconst { 28613 break 28614 } 28615 j1 := s1.AuxInt 28616 x1 := s1.Args[0] 28617 if x1.Op != OpAMD64MOVBload { 28618 break 28619 } 28620 i1 := x1.AuxInt 28621 s := x1.Aux 28622 mem := x1.Args[1] 28623 p := x1.Args[0] 28624 s0 := v.Args[1] 28625 if s0.Op != OpAMD64SHLLconst { 28626 break 28627 } 28628 j0 := s0.AuxInt 28629 x0 := s0.Args[0] 28630 if x0.Op != OpAMD64MOVBload { 28631 break 28632 } 28633 i0 := x0.AuxInt 28634 if x0.Aux != s { 28635 break 28636 } 28637 _ = x0.Args[1] 28638 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28639 break 28640 } 28641 b = mergePoint(b, x0, x1, y) 28642 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) 28643 v.reset(OpCopy) 28644 v.AddArg(v0) 28645 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) 28646 v1.AuxInt = j1 28647 v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) 28648 v2.AuxInt = 8 28649 v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 28650 v3.AuxInt = i0 28651 v3.Aux = s 28652 v3.AddArg(p) 28653 v3.AddArg(mem) 28654 v2.AddArg(v3) 28655 v1.AddArg(v2) 28656 v0.AddArg(v1) 28657 v0.AddArg(y) 28658 return true 28659 } 28660 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 28661 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28662 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28663 for { 28664 _ = v.Args[1] 28665 x1 := v.Args[0] 28666 if x1.Op != OpAMD64MOVBloadidx1 { 28667 break 28668 } 28669 i1 := x1.AuxInt 28670 s := x1.Aux 28671 mem := x1.Args[2] 28672 p := x1.Args[0] 28673 idx := x1.Args[1] 28674 sh := v.Args[1] 28675 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 28676 break 28677 } 28678 x0 := sh.Args[0] 28679 if x0.Op != OpAMD64MOVBloadidx1 { 28680 break 28681 } 28682 i0 := x0.AuxInt 28683 if x0.Aux != s { 28684 break 28685 } 28686 _ = x0.Args[2] 28687 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28688 break 28689 } 28690 b = mergePoint(b, x0, x1) 28691 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28692 v.reset(OpCopy) 28693 v.AddArg(v0) 28694 v0.AuxInt = 8 28695 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28696 v1.AuxInt = i0 28697 v1.Aux = s 28698 v1.AddArg(p) 28699 v1.AddArg(idx) 28700 v1.AddArg(mem) 28701 v0.AddArg(v1) 28702 return true 28703 } 28704 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 28705 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28706 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28707 for { 28708 _ = v.Args[1] 28709 x1 := v.Args[0] 28710 if x1.Op != OpAMD64MOVBloadidx1 { 28711 break 28712 } 28713 i1 := x1.AuxInt 28714 s := x1.Aux 28715 mem := x1.Args[2] 28716 idx := x1.Args[0] 28717 p := x1.Args[1] 28718 sh := v.Args[1] 28719 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 28720 break 28721 } 28722 x0 := sh.Args[0] 28723 if x0.Op != OpAMD64MOVBloadidx1 { 28724 break 28725 } 28726 i0 := x0.AuxInt 28727 if x0.Aux != s { 28728 break 28729 } 28730 _ = x0.Args[2] 28731 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28732 break 28733 } 28734 b = mergePoint(b, x0, x1) 28735 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28736 v.reset(OpCopy) 28737 v.AddArg(v0) 28738 v0.AuxInt = 8 28739 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28740 v1.AuxInt = i0 28741 v1.Aux = s 28742 v1.AddArg(p) 28743 v1.AddArg(idx) 28744 v1.AddArg(mem) 28745 v0.AddArg(v1) 28746 return true 28747 } 28748 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 28749 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28750 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28751 for { 28752 _ = v.Args[1] 28753 x1 := v.Args[0] 28754 if x1.Op != OpAMD64MOVBloadidx1 { 28755 break 28756 } 28757 i1 := x1.AuxInt 28758 s := x1.Aux 28759 mem := x1.Args[2] 28760 p := x1.Args[0] 28761 idx := x1.Args[1] 28762 sh := v.Args[1] 28763 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 28764 break 28765 } 28766 x0 := sh.Args[0] 28767 if x0.Op != OpAMD64MOVBloadidx1 { 28768 break 28769 } 28770 i0 := x0.AuxInt 28771 if x0.Aux != s { 28772 break 28773 } 28774 _ = x0.Args[2] 28775 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28776 break 28777 } 28778 b = mergePoint(b, x0, x1) 28779 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28780 v.reset(OpCopy) 28781 v.AddArg(v0) 28782 v0.AuxInt = 8 28783 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28784 v1.AuxInt = i0 28785 v1.Aux = s 28786 v1.AddArg(p) 28787 v1.AddArg(idx) 28788 v1.AddArg(mem) 28789 v0.AddArg(v1) 28790 return true 28791 } 28792 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 28793 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28794 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28795 for { 28796 _ = v.Args[1] 28797 x1 := v.Args[0] 28798 if x1.Op != OpAMD64MOVBloadidx1 { 28799 break 28800 } 28801 i1 := x1.AuxInt 28802 s := x1.Aux 28803 mem := x1.Args[2] 28804 idx := x1.Args[0] 28805 p := x1.Args[1] 28806 sh := v.Args[1] 28807 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 28808 break 28809 } 28810 x0 := sh.Args[0] 28811 if x0.Op != OpAMD64MOVBloadidx1 { 28812 break 28813 } 28814 i0 := x0.AuxInt 28815 if x0.Aux != s { 28816 break 28817 } 28818 _ = x0.Args[2] 28819 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28820 break 28821 } 28822 b = mergePoint(b, x0, x1) 28823 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28824 v.reset(OpCopy) 28825 v.AddArg(v0) 28826 v0.AuxInt = 8 28827 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28828 v1.AuxInt = i0 28829 v1.Aux = s 28830 v1.AddArg(p) 28831 v1.AddArg(idx) 28832 v1.AddArg(mem) 28833 v0.AddArg(v1) 28834 return true 28835 } 28836 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 28837 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28838 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28839 for { 28840 _ = v.Args[1] 28841 sh := v.Args[0] 28842 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 28843 break 28844 } 28845 x0 := sh.Args[0] 28846 if x0.Op != OpAMD64MOVBloadidx1 { 28847 break 28848 } 28849 i0 := x0.AuxInt 28850 s := x0.Aux 28851 mem := x0.Args[2] 28852 p := x0.Args[0] 28853 idx := x0.Args[1] 28854 x1 := v.Args[1] 28855 if x1.Op != OpAMD64MOVBloadidx1 { 28856 break 28857 } 28858 i1 := x1.AuxInt 28859 if x1.Aux != s { 28860 break 28861 } 28862 _ = x1.Args[2] 28863 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28864 break 28865 } 28866 b = mergePoint(b, x0, x1) 28867 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28868 v.reset(OpCopy) 28869 v.AddArg(v0) 28870 v0.AuxInt = 8 28871 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28872 v1.AuxInt = i0 28873 v1.Aux = s 28874 v1.AddArg(p) 28875 v1.AddArg(idx) 28876 v1.AddArg(mem) 28877 v0.AddArg(v1) 28878 return true 28879 } 28880 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 28881 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28882 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28883 for { 28884 _ = v.Args[1] 28885 sh := v.Args[0] 28886 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 28887 break 28888 } 28889 x0 := sh.Args[0] 28890 if x0.Op != OpAMD64MOVBloadidx1 { 28891 break 28892 } 28893 i0 := x0.AuxInt 28894 s := x0.Aux 28895 mem := x0.Args[2] 28896 idx := x0.Args[0] 28897 p := x0.Args[1] 28898 x1 := v.Args[1] 28899 if x1.Op != OpAMD64MOVBloadidx1 { 28900 break 28901 } 28902 i1 := x1.AuxInt 28903 if x1.Aux != s { 28904 break 28905 } 28906 _ = x1.Args[2] 28907 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28908 break 28909 } 28910 b = mergePoint(b, x0, x1) 28911 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28912 v.reset(OpCopy) 28913 v.AddArg(v0) 28914 v0.AuxInt = 8 28915 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28916 v1.AuxInt = i0 28917 v1.Aux = s 28918 v1.AddArg(p) 28919 v1.AddArg(idx) 28920 v1.AddArg(mem) 28921 v0.AddArg(v1) 28922 return true 28923 } 28924 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 28925 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28926 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28927 for { 28928 _ = v.Args[1] 28929 sh := v.Args[0] 28930 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 28931 break 28932 } 28933 x0 := sh.Args[0] 28934 if x0.Op != OpAMD64MOVBloadidx1 { 28935 break 28936 } 28937 i0 := x0.AuxInt 28938 s := x0.Aux 28939 mem := x0.Args[2] 28940 p := x0.Args[0] 28941 idx := x0.Args[1] 28942 x1 := v.Args[1] 28943 if x1.Op != OpAMD64MOVBloadidx1 { 28944 break 28945 } 28946 i1 := x1.AuxInt 28947 if x1.Aux != s { 28948 break 28949 } 28950 _ = x1.Args[2] 28951 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28952 break 28953 } 28954 b = mergePoint(b, x0, x1) 28955 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28956 v.reset(OpCopy) 28957 v.AddArg(v0) 28958 v0.AuxInt = 8 28959 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28960 v1.AuxInt = i0 28961 v1.Aux = s 28962 v1.AddArg(p) 28963 v1.AddArg(idx) 28964 v1.AddArg(mem) 28965 v0.AddArg(v1) 28966 return true 28967 } 28968 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 28969 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28970 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28971 for { 28972 _ = v.Args[1] 28973 sh := v.Args[0] 28974 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { 28975 break 28976 } 28977 x0 := sh.Args[0] 28978 if x0.Op != OpAMD64MOVBloadidx1 { 28979 break 28980 } 28981 i0 := x0.AuxInt 28982 s := x0.Aux 28983 mem := x0.Args[2] 28984 idx := x0.Args[0] 28985 p := x0.Args[1] 28986 x1 := v.Args[1] 28987 if x1.Op != OpAMD64MOVBloadidx1 { 28988 break 28989 } 28990 i1 := x1.AuxInt 28991 if x1.Aux != s { 28992 break 28993 } 28994 _ = x1.Args[2] 28995 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28996 break 28997 } 28998 b = mergePoint(b, x0, x1) 28999 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29000 v.reset(OpCopy) 29001 v.AddArg(v0) 29002 v0.AuxInt = 8 29003 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29004 v1.AuxInt = i0 29005 v1.Aux = s 29006 v1.AddArg(p) 29007 v1.AddArg(idx) 29008 v1.AddArg(mem) 29009 v0.AddArg(v1) 29010 return true 29011 } 29012 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 29013 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29014 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29015 for { 29016 _ = v.Args[1] 29017 r1 := v.Args[0] 29018 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 29019 break 29020 } 29021 x1 := r1.Args[0] 29022 if x1.Op != OpAMD64MOVWloadidx1 { 29023 break 29024 } 29025 i1 := x1.AuxInt 29026 s := x1.Aux 29027 mem := x1.Args[2] 29028 p := x1.Args[0] 29029 idx := x1.Args[1] 29030 sh := v.Args[1] 29031 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 29032 break 29033 } 29034 r0 := sh.Args[0] 29035 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 29036 break 29037 } 29038 x0 := r0.Args[0] 29039 if x0.Op != OpAMD64MOVWloadidx1 { 29040 break 29041 } 29042 i0 := x0.AuxInt 29043 if x0.Aux != s { 29044 break 29045 } 29046 _ = x0.Args[2] 29047 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29048 break 29049 } 29050 b = mergePoint(b, x0, x1) 29051 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29052 v.reset(OpCopy) 29053 v.AddArg(v0) 29054 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29055 v1.AuxInt = i0 29056 v1.Aux = s 29057 v1.AddArg(p) 29058 v1.AddArg(idx) 29059 v1.AddArg(mem) 29060 v0.AddArg(v1) 29061 return true 29062 } 29063 return false 29064 } 29065 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { 29066 b := v.Block 29067 typ := &b.Func.Config.Types 29068 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 29069 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29070 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29071 for { 29072 _ = v.Args[1] 29073 r1 := v.Args[0] 29074 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 29075 break 29076 } 29077 x1 := r1.Args[0] 29078 if x1.Op != OpAMD64MOVWloadidx1 { 29079 break 29080 } 29081 i1 := x1.AuxInt 29082 s := x1.Aux 29083 mem := x1.Args[2] 29084 idx := x1.Args[0] 29085 p := x1.Args[1] 29086 sh := v.Args[1] 29087 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 29088 break 29089 } 29090 r0 := sh.Args[0] 29091 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 29092 break 29093 } 29094 x0 := r0.Args[0] 29095 if x0.Op != OpAMD64MOVWloadidx1 { 29096 break 29097 } 29098 i0 := x0.AuxInt 29099 if x0.Aux != s { 29100 break 29101 } 29102 _ = x0.Args[2] 29103 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29104 break 29105 } 29106 b = mergePoint(b, x0, x1) 29107 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29108 v.reset(OpCopy) 29109 v.AddArg(v0) 29110 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29111 v1.AuxInt = i0 29112 v1.Aux = s 29113 v1.AddArg(p) 29114 v1.AddArg(idx) 29115 v1.AddArg(mem) 29116 v0.AddArg(v1) 29117 return true 29118 } 29119 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 29120 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29121 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29122 for { 29123 _ = v.Args[1] 29124 r1 := v.Args[0] 29125 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 29126 break 29127 } 29128 x1 := r1.Args[0] 29129 if x1.Op != OpAMD64MOVWloadidx1 { 29130 break 29131 } 29132 i1 := x1.AuxInt 29133 s := x1.Aux 29134 mem := x1.Args[2] 29135 p := x1.Args[0] 29136 idx := x1.Args[1] 29137 sh := v.Args[1] 29138 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 29139 break 29140 } 29141 r0 := sh.Args[0] 29142 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 29143 break 29144 } 29145 x0 := r0.Args[0] 29146 if x0.Op != OpAMD64MOVWloadidx1 { 29147 break 29148 } 29149 i0 := x0.AuxInt 29150 if x0.Aux != s { 29151 break 29152 } 29153 _ = x0.Args[2] 29154 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29155 break 29156 } 29157 b = mergePoint(b, x0, x1) 29158 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29159 v.reset(OpCopy) 29160 v.AddArg(v0) 29161 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29162 v1.AuxInt = i0 29163 v1.Aux = s 29164 v1.AddArg(p) 29165 v1.AddArg(idx) 29166 v1.AddArg(mem) 29167 v0.AddArg(v1) 29168 return true 29169 } 29170 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 29171 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29172 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29173 for { 29174 _ = v.Args[1] 29175 r1 := v.Args[0] 29176 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 29177 break 29178 } 29179 x1 := r1.Args[0] 29180 if x1.Op != OpAMD64MOVWloadidx1 { 29181 break 29182 } 29183 i1 := x1.AuxInt 29184 s := x1.Aux 29185 mem := x1.Args[2] 29186 idx := x1.Args[0] 29187 p := x1.Args[1] 29188 sh := v.Args[1] 29189 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 29190 break 29191 } 29192 r0 := sh.Args[0] 29193 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 29194 break 29195 } 29196 x0 := r0.Args[0] 29197 if x0.Op != OpAMD64MOVWloadidx1 { 29198 break 29199 } 29200 i0 := x0.AuxInt 29201 if x0.Aux != s { 29202 break 29203 } 29204 _ = x0.Args[2] 29205 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29206 break 29207 } 29208 b = mergePoint(b, x0, x1) 29209 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29210 v.reset(OpCopy) 29211 v.AddArg(v0) 29212 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29213 v1.AuxInt = i0 29214 v1.Aux = s 29215 v1.AddArg(p) 29216 v1.AddArg(idx) 29217 v1.AddArg(mem) 29218 v0.AddArg(v1) 29219 return true 29220 } 29221 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 29222 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29223 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29224 for { 29225 _ = v.Args[1] 29226 sh := v.Args[0] 29227 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 29228 break 29229 } 29230 r0 := sh.Args[0] 29231 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 29232 break 29233 } 29234 x0 := r0.Args[0] 29235 if x0.Op != OpAMD64MOVWloadidx1 { 29236 break 29237 } 29238 i0 := x0.AuxInt 29239 s := x0.Aux 29240 mem := x0.Args[2] 29241 p := x0.Args[0] 29242 idx := x0.Args[1] 29243 r1 := v.Args[1] 29244 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 29245 break 29246 } 29247 x1 := r1.Args[0] 29248 if x1.Op != OpAMD64MOVWloadidx1 { 29249 break 29250 } 29251 i1 := x1.AuxInt 29252 if x1.Aux != s { 29253 break 29254 } 29255 _ = x1.Args[2] 29256 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29257 break 29258 } 29259 b = mergePoint(b, x0, x1) 29260 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29261 v.reset(OpCopy) 29262 v.AddArg(v0) 29263 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29264 v1.AuxInt = i0 29265 v1.Aux = s 29266 v1.AddArg(p) 29267 v1.AddArg(idx) 29268 v1.AddArg(mem) 29269 v0.AddArg(v1) 29270 return true 29271 } 29272 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 29273 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29274 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29275 for { 29276 _ = v.Args[1] 29277 sh := v.Args[0] 29278 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 29279 break 29280 } 29281 r0 := sh.Args[0] 29282 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 29283 break 29284 } 29285 x0 := r0.Args[0] 29286 if x0.Op != OpAMD64MOVWloadidx1 { 29287 break 29288 } 29289 i0 := x0.AuxInt 29290 s := x0.Aux 29291 mem := x0.Args[2] 29292 idx := x0.Args[0] 29293 p := x0.Args[1] 29294 r1 := v.Args[1] 29295 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 29296 break 29297 } 29298 x1 := r1.Args[0] 29299 if x1.Op != OpAMD64MOVWloadidx1 { 29300 break 29301 } 29302 i1 := x1.AuxInt 29303 if x1.Aux != s { 29304 break 29305 } 29306 _ = x1.Args[2] 29307 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29308 break 29309 } 29310 b = mergePoint(b, x0, x1) 29311 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29312 v.reset(OpCopy) 29313 v.AddArg(v0) 29314 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29315 v1.AuxInt = i0 29316 v1.Aux = s 29317 v1.AddArg(p) 29318 v1.AddArg(idx) 29319 v1.AddArg(mem) 29320 v0.AddArg(v1) 29321 return true 29322 } 29323 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 29324 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29325 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29326 for { 29327 _ = v.Args[1] 29328 sh := v.Args[0] 29329 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 29330 break 29331 } 29332 r0 := sh.Args[0] 29333 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 29334 break 29335 } 29336 x0 := r0.Args[0] 29337 if x0.Op != OpAMD64MOVWloadidx1 { 29338 break 29339 } 29340 i0 := x0.AuxInt 29341 s := x0.Aux 29342 mem := x0.Args[2] 29343 p := x0.Args[0] 29344 idx := x0.Args[1] 29345 r1 := v.Args[1] 29346 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 29347 break 29348 } 29349 x1 := r1.Args[0] 29350 if x1.Op != OpAMD64MOVWloadidx1 { 29351 break 29352 } 29353 i1 := x1.AuxInt 29354 if x1.Aux != s { 29355 break 29356 } 29357 _ = x1.Args[2] 29358 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29359 break 29360 } 29361 b = mergePoint(b, x0, x1) 29362 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29363 v.reset(OpCopy) 29364 v.AddArg(v0) 29365 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29366 v1.AuxInt = i0 29367 v1.Aux = s 29368 v1.AddArg(p) 29369 v1.AddArg(idx) 29370 v1.AddArg(mem) 29371 v0.AddArg(v1) 29372 return true 29373 } 29374 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 29375 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29376 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29377 for { 29378 _ = v.Args[1] 29379 sh := v.Args[0] 29380 if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { 29381 break 29382 } 29383 r0 := sh.Args[0] 29384 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 29385 break 29386 } 29387 x0 := r0.Args[0] 29388 if x0.Op != OpAMD64MOVWloadidx1 { 29389 break 29390 } 29391 i0 := x0.AuxInt 29392 s := x0.Aux 29393 mem := x0.Args[2] 29394 idx := x0.Args[0] 29395 p := x0.Args[1] 29396 r1 := v.Args[1] 29397 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 29398 break 29399 } 29400 x1 := r1.Args[0] 29401 if x1.Op != OpAMD64MOVWloadidx1 { 29402 break 29403 } 29404 i1 := x1.AuxInt 29405 if x1.Aux != s { 29406 break 29407 } 29408 _ = x1.Args[2] 29409 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29410 break 29411 } 29412 b = mergePoint(b, x0, x1) 29413 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29414 v.reset(OpCopy) 29415 v.AddArg(v0) 29416 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29417 v1.AuxInt = i0 29418 v1.Aux = s 29419 v1.AddArg(p) 29420 v1.AddArg(idx) 29421 v1.AddArg(mem) 29422 v0.AddArg(v1) 29423 return true 29424 } 29425 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 29426 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29427 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29428 for { 29429 _ = v.Args[1] 29430 s0 := v.Args[0] 29431 if s0.Op != OpAMD64SHLLconst { 29432 break 29433 } 29434 j0 := s0.AuxInt 29435 x0 := s0.Args[0] 29436 if x0.Op != OpAMD64MOVBloadidx1 { 29437 break 29438 } 29439 i0 := x0.AuxInt 29440 s := x0.Aux 29441 mem := x0.Args[2] 29442 p := x0.Args[0] 29443 idx := x0.Args[1] 29444 or := v.Args[1] 29445 if or.Op != OpAMD64ORL { 29446 break 29447 } 29448 y := or.Args[1] 29449 s1 := or.Args[0] 29450 if s1.Op != OpAMD64SHLLconst { 29451 break 29452 } 29453 j1 := s1.AuxInt 29454 x1 := s1.Args[0] 29455 if x1.Op != OpAMD64MOVBloadidx1 { 29456 break 29457 } 29458 i1 := x1.AuxInt 29459 if x1.Aux != s { 29460 break 29461 } 29462 _ = x1.Args[2] 29463 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29464 break 29465 } 29466 b = mergePoint(b, x0, x1, y) 29467 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 29468 v.reset(OpCopy) 29469 v.AddArg(v0) 29470 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 29471 v1.AuxInt = j1 29472 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29473 v2.AuxInt = 8 29474 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29475 v3.AuxInt = i0 29476 v3.Aux = s 29477 v3.AddArg(p) 29478 v3.AddArg(idx) 29479 v3.AddArg(mem) 29480 v2.AddArg(v3) 29481 v1.AddArg(v2) 29482 v0.AddArg(v1) 29483 v0.AddArg(y) 29484 return true 29485 } 29486 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 29487 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29488 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29489 for { 29490 _ = v.Args[1] 29491 s0 := v.Args[0] 29492 if s0.Op != OpAMD64SHLLconst { 29493 break 29494 } 29495 j0 := s0.AuxInt 29496 x0 := s0.Args[0] 29497 if x0.Op != OpAMD64MOVBloadidx1 { 29498 break 29499 } 29500 i0 := x0.AuxInt 29501 s := x0.Aux 29502 mem := x0.Args[2] 29503 idx := x0.Args[0] 29504 p := x0.Args[1] 29505 or := v.Args[1] 29506 if or.Op != OpAMD64ORL { 29507 break 29508 } 29509 y := or.Args[1] 29510 s1 := or.Args[0] 29511 if s1.Op != OpAMD64SHLLconst { 29512 break 29513 } 29514 j1 := s1.AuxInt 29515 x1 := s1.Args[0] 29516 if x1.Op != OpAMD64MOVBloadidx1 { 29517 break 29518 } 29519 i1 := x1.AuxInt 29520 if x1.Aux != s { 29521 break 29522 } 29523 _ = x1.Args[2] 29524 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29525 break 29526 } 29527 b = mergePoint(b, x0, x1, y) 29528 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 29529 v.reset(OpCopy) 29530 v.AddArg(v0) 29531 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 29532 v1.AuxInt = j1 29533 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29534 v2.AuxInt = 8 29535 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29536 v3.AuxInt = i0 29537 v3.Aux = s 29538 v3.AddArg(p) 29539 v3.AddArg(idx) 29540 v3.AddArg(mem) 29541 v2.AddArg(v3) 29542 v1.AddArg(v2) 29543 v0.AddArg(v1) 29544 v0.AddArg(y) 29545 return true 29546 } 29547 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 29548 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29549 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29550 for { 29551 _ = v.Args[1] 29552 s0 := v.Args[0] 29553 if s0.Op != OpAMD64SHLLconst { 29554 break 29555 } 29556 j0 := s0.AuxInt 29557 x0 := s0.Args[0] 29558 if x0.Op != OpAMD64MOVBloadidx1 { 29559 break 29560 } 29561 i0 := x0.AuxInt 29562 s := x0.Aux 29563 mem := x0.Args[2] 29564 p := x0.Args[0] 29565 idx := x0.Args[1] 29566 or := v.Args[1] 29567 if or.Op != OpAMD64ORL { 29568 break 29569 } 29570 y := or.Args[1] 29571 s1 := or.Args[0] 29572 if s1.Op != OpAMD64SHLLconst { 29573 break 29574 } 29575 j1 := s1.AuxInt 29576 x1 := s1.Args[0] 29577 if x1.Op != OpAMD64MOVBloadidx1 { 29578 break 29579 } 29580 i1 := x1.AuxInt 29581 if x1.Aux != s { 29582 break 29583 } 29584 _ = x1.Args[2] 29585 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29586 break 29587 } 29588 b = mergePoint(b, x0, x1, y) 29589 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 29590 v.reset(OpCopy) 29591 v.AddArg(v0) 29592 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 29593 v1.AuxInt = j1 29594 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29595 v2.AuxInt = 8 29596 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29597 v3.AuxInt = i0 29598 v3.Aux = s 29599 v3.AddArg(p) 29600 v3.AddArg(idx) 29601 v3.AddArg(mem) 29602 v2.AddArg(v3) 29603 v1.AddArg(v2) 29604 v0.AddArg(v1) 29605 v0.AddArg(y) 29606 return true 29607 } 29608 return false 29609 } 29610 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { 29611 b := v.Block 29612 typ := &b.Func.Config.Types 29613 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 29614 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29615 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29616 for { 29617 _ = v.Args[1] 29618 s0 := v.Args[0] 29619 if s0.Op != OpAMD64SHLLconst { 29620 break 29621 } 29622 j0 := s0.AuxInt 29623 x0 := s0.Args[0] 29624 if x0.Op != OpAMD64MOVBloadidx1 { 29625 break 29626 } 29627 i0 := x0.AuxInt 29628 s := x0.Aux 29629 mem := x0.Args[2] 29630 idx := x0.Args[0] 29631 p := x0.Args[1] 29632 or := v.Args[1] 29633 if or.Op != OpAMD64ORL { 29634 break 29635 } 29636 y := or.Args[1] 29637 s1 := or.Args[0] 29638 if s1.Op != OpAMD64SHLLconst { 29639 break 29640 } 29641 j1 := s1.AuxInt 29642 x1 := s1.Args[0] 29643 if x1.Op != OpAMD64MOVBloadidx1 { 29644 break 29645 } 29646 i1 := x1.AuxInt 29647 if x1.Aux != s { 29648 break 29649 } 29650 _ = x1.Args[2] 29651 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29652 break 29653 } 29654 b = mergePoint(b, x0, x1, y) 29655 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 29656 v.reset(OpCopy) 29657 v.AddArg(v0) 29658 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 29659 v1.AuxInt = j1 29660 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29661 v2.AuxInt = 8 29662 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29663 v3.AuxInt = i0 29664 v3.Aux = s 29665 v3.AddArg(p) 29666 v3.AddArg(idx) 29667 v3.AddArg(mem) 29668 v2.AddArg(v3) 29669 v1.AddArg(v2) 29670 v0.AddArg(v1) 29671 v0.AddArg(y) 29672 return true 29673 } 29674 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 29675 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29676 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29677 for { 29678 _ = v.Args[1] 29679 s0 := v.Args[0] 29680 if s0.Op != OpAMD64SHLLconst { 29681 break 29682 } 29683 j0 := s0.AuxInt 29684 x0 := s0.Args[0] 29685 if x0.Op != OpAMD64MOVBloadidx1 { 29686 break 29687 } 29688 i0 := x0.AuxInt 29689 s := x0.Aux 29690 mem := x0.Args[2] 29691 p := x0.Args[0] 29692 idx := x0.Args[1] 29693 or := v.Args[1] 29694 if or.Op != OpAMD64ORL { 29695 break 29696 } 29697 _ = or.Args[1] 29698 y := or.Args[0] 29699 s1 := or.Args[1] 29700 if s1.Op != OpAMD64SHLLconst { 29701 break 29702 } 29703 j1 := s1.AuxInt 29704 x1 := s1.Args[0] 29705 if x1.Op != OpAMD64MOVBloadidx1 { 29706 break 29707 } 29708 i1 := x1.AuxInt 29709 if x1.Aux != s { 29710 break 29711 } 29712 _ = x1.Args[2] 29713 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29714 break 29715 } 29716 b = mergePoint(b, x0, x1, y) 29717 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 29718 v.reset(OpCopy) 29719 v.AddArg(v0) 29720 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 29721 v1.AuxInt = j1 29722 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29723 v2.AuxInt = 8 29724 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29725 v3.AuxInt = i0 29726 v3.Aux = s 29727 v3.AddArg(p) 29728 v3.AddArg(idx) 29729 v3.AddArg(mem) 29730 v2.AddArg(v3) 29731 v1.AddArg(v2) 29732 v0.AddArg(v1) 29733 v0.AddArg(y) 29734 return true 29735 } 29736 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 29737 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29738 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29739 for { 29740 _ = v.Args[1] 29741 s0 := v.Args[0] 29742 if s0.Op != OpAMD64SHLLconst { 29743 break 29744 } 29745 j0 := s0.AuxInt 29746 x0 := s0.Args[0] 29747 if x0.Op != OpAMD64MOVBloadidx1 { 29748 break 29749 } 29750 i0 := x0.AuxInt 29751 s := x0.Aux 29752 mem := x0.Args[2] 29753 idx := x0.Args[0] 29754 p := x0.Args[1] 29755 or := v.Args[1] 29756 if or.Op != OpAMD64ORL { 29757 break 29758 } 29759 _ = or.Args[1] 29760 y := or.Args[0] 29761 s1 := or.Args[1] 29762 if s1.Op != OpAMD64SHLLconst { 29763 break 29764 } 29765 j1 := s1.AuxInt 29766 x1 := s1.Args[0] 29767 if x1.Op != OpAMD64MOVBloadidx1 { 29768 break 29769 } 29770 i1 := x1.AuxInt 29771 if x1.Aux != s { 29772 break 29773 } 29774 _ = x1.Args[2] 29775 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29776 break 29777 } 29778 b = mergePoint(b, x0, x1, y) 29779 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 29780 v.reset(OpCopy) 29781 v.AddArg(v0) 29782 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 29783 v1.AuxInt = j1 29784 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29785 v2.AuxInt = 8 29786 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29787 v3.AuxInt = i0 29788 v3.Aux = s 29789 v3.AddArg(p) 29790 v3.AddArg(idx) 29791 v3.AddArg(mem) 29792 v2.AddArg(v3) 29793 v1.AddArg(v2) 29794 v0.AddArg(v1) 29795 v0.AddArg(y) 29796 return true 29797 } 29798 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 29799 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29800 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29801 for { 29802 _ = v.Args[1] 29803 s0 := v.Args[0] 29804 if s0.Op != OpAMD64SHLLconst { 29805 break 29806 } 29807 j0 := s0.AuxInt 29808 x0 := s0.Args[0] 29809 if x0.Op != OpAMD64MOVBloadidx1 { 29810 break 29811 } 29812 i0 := x0.AuxInt 29813 s := x0.Aux 29814 mem := x0.Args[2] 29815 p := x0.Args[0] 29816 idx := x0.Args[1] 29817 or := v.Args[1] 29818 if or.Op != OpAMD64ORL { 29819 break 29820 } 29821 _ = or.Args[1] 29822 y := or.Args[0] 29823 s1 := or.Args[1] 29824 if s1.Op != OpAMD64SHLLconst { 29825 break 29826 } 29827 j1 := s1.AuxInt 29828 x1 := s1.Args[0] 29829 if x1.Op != OpAMD64MOVBloadidx1 { 29830 break 29831 } 29832 i1 := x1.AuxInt 29833 if x1.Aux != s { 29834 break 29835 } 29836 _ = x1.Args[2] 29837 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29838 break 29839 } 29840 b = mergePoint(b, x0, x1, y) 29841 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 29842 v.reset(OpCopy) 29843 v.AddArg(v0) 29844 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 29845 v1.AuxInt = j1 29846 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29847 v2.AuxInt = 8 29848 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29849 v3.AuxInt = i0 29850 v3.Aux = s 29851 v3.AddArg(p) 29852 v3.AddArg(idx) 29853 v3.AddArg(mem) 29854 v2.AddArg(v3) 29855 v1.AddArg(v2) 29856 v0.AddArg(v1) 29857 v0.AddArg(y) 29858 return true 29859 } 29860 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 29861 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29862 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29863 for { 29864 _ = v.Args[1] 29865 s0 := v.Args[0] 29866 if s0.Op != OpAMD64SHLLconst { 29867 break 29868 } 29869 j0 := s0.AuxInt 29870 x0 := s0.Args[0] 29871 if x0.Op != OpAMD64MOVBloadidx1 { 29872 break 29873 } 29874 i0 := x0.AuxInt 29875 s := x0.Aux 29876 mem := x0.Args[2] 29877 idx := x0.Args[0] 29878 p := x0.Args[1] 29879 or := v.Args[1] 29880 if or.Op != OpAMD64ORL { 29881 break 29882 } 29883 _ = or.Args[1] 29884 y := or.Args[0] 29885 s1 := or.Args[1] 29886 if s1.Op != OpAMD64SHLLconst { 29887 break 29888 } 29889 j1 := s1.AuxInt 29890 x1 := s1.Args[0] 29891 if x1.Op != OpAMD64MOVBloadidx1 { 29892 break 29893 } 29894 i1 := x1.AuxInt 29895 if x1.Aux != s { 29896 break 29897 } 29898 _ = x1.Args[2] 29899 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29900 break 29901 } 29902 b = mergePoint(b, x0, x1, y) 29903 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 29904 v.reset(OpCopy) 29905 v.AddArg(v0) 29906 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 29907 v1.AuxInt = j1 29908 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29909 v2.AuxInt = 8 29910 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29911 v3.AuxInt = i0 29912 v3.Aux = s 29913 v3.AddArg(p) 29914 v3.AddArg(idx) 29915 v3.AddArg(mem) 29916 v2.AddArg(v3) 29917 v1.AddArg(v2) 29918 v0.AddArg(v1) 29919 v0.AddArg(y) 29920 return true 29921 } 29922 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 29923 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29924 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29925 for { 29926 _ = v.Args[1] 29927 or := v.Args[0] 29928 if or.Op != OpAMD64ORL { 29929 break 29930 } 29931 y := or.Args[1] 29932 s1 := or.Args[0] 29933 if s1.Op != OpAMD64SHLLconst { 29934 break 29935 } 29936 j1 := s1.AuxInt 29937 x1 := s1.Args[0] 29938 if x1.Op != OpAMD64MOVBloadidx1 { 29939 break 29940 } 29941 i1 := x1.AuxInt 29942 s := x1.Aux 29943 mem := x1.Args[2] 29944 p := x1.Args[0] 29945 idx := x1.Args[1] 29946 s0 := v.Args[1] 29947 if s0.Op != OpAMD64SHLLconst { 29948 break 29949 } 29950 j0 := s0.AuxInt 29951 x0 := s0.Args[0] 29952 if x0.Op != OpAMD64MOVBloadidx1 { 29953 break 29954 } 29955 i0 := x0.AuxInt 29956 if x0.Aux != s { 29957 break 29958 } 29959 _ = x0.Args[2] 29960 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29961 break 29962 } 29963 b = mergePoint(b, x0, x1, y) 29964 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 29965 v.reset(OpCopy) 29966 v.AddArg(v0) 29967 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 29968 v1.AuxInt = j1 29969 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29970 v2.AuxInt = 8 29971 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29972 v3.AuxInt = i0 29973 v3.Aux = s 29974 v3.AddArg(p) 29975 v3.AddArg(idx) 29976 v3.AddArg(mem) 29977 v2.AddArg(v3) 29978 v1.AddArg(v2) 29979 v0.AddArg(v1) 29980 v0.AddArg(y) 29981 return true 29982 } 29983 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 29984 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29985 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29986 for { 29987 _ = v.Args[1] 29988 or := v.Args[0] 29989 if or.Op != OpAMD64ORL { 29990 break 29991 } 29992 y := or.Args[1] 29993 s1 := or.Args[0] 29994 if s1.Op != OpAMD64SHLLconst { 29995 break 29996 } 29997 j1 := s1.AuxInt 29998 x1 := s1.Args[0] 29999 if x1.Op != OpAMD64MOVBloadidx1 { 30000 break 30001 } 30002 i1 := x1.AuxInt 30003 s := x1.Aux 30004 mem := x1.Args[2] 30005 idx := x1.Args[0] 30006 p := x1.Args[1] 30007 s0 := v.Args[1] 30008 if s0.Op != OpAMD64SHLLconst { 30009 break 30010 } 30011 j0 := s0.AuxInt 30012 x0 := s0.Args[0] 30013 if x0.Op != OpAMD64MOVBloadidx1 { 30014 break 30015 } 30016 i0 := x0.AuxInt 30017 if x0.Aux != s { 30018 break 30019 } 30020 _ = x0.Args[2] 30021 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30022 break 30023 } 30024 b = mergePoint(b, x0, x1, y) 30025 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 30026 v.reset(OpCopy) 30027 v.AddArg(v0) 30028 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 30029 v1.AuxInt = j1 30030 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30031 v2.AuxInt = 8 30032 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30033 v3.AuxInt = i0 30034 v3.Aux = s 30035 v3.AddArg(p) 30036 v3.AddArg(idx) 30037 v3.AddArg(mem) 30038 v2.AddArg(v3) 30039 v1.AddArg(v2) 30040 v0.AddArg(v1) 30041 v0.AddArg(y) 30042 return true 30043 } 30044 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30045 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30046 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30047 for { 30048 _ = v.Args[1] 30049 or := v.Args[0] 30050 if or.Op != OpAMD64ORL { 30051 break 30052 } 30053 _ = or.Args[1] 30054 y := or.Args[0] 30055 s1 := or.Args[1] 30056 if s1.Op != OpAMD64SHLLconst { 30057 break 30058 } 30059 j1 := s1.AuxInt 30060 x1 := s1.Args[0] 30061 if x1.Op != OpAMD64MOVBloadidx1 { 30062 break 30063 } 30064 i1 := x1.AuxInt 30065 s := x1.Aux 30066 mem := x1.Args[2] 30067 p := x1.Args[0] 30068 idx := x1.Args[1] 30069 s0 := v.Args[1] 30070 if s0.Op != OpAMD64SHLLconst { 30071 break 30072 } 30073 j0 := s0.AuxInt 30074 x0 := s0.Args[0] 30075 if x0.Op != OpAMD64MOVBloadidx1 { 30076 break 30077 } 30078 i0 := x0.AuxInt 30079 if x0.Aux != s { 30080 break 30081 } 30082 _ = x0.Args[2] 30083 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30084 break 30085 } 30086 b = mergePoint(b, x0, x1, y) 30087 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 30088 v.reset(OpCopy) 30089 v.AddArg(v0) 30090 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 30091 v1.AuxInt = j1 30092 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30093 v2.AuxInt = 8 30094 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30095 v3.AuxInt = i0 30096 v3.Aux = s 30097 v3.AddArg(p) 30098 v3.AddArg(idx) 30099 v3.AddArg(mem) 30100 v2.AddArg(v3) 30101 v1.AddArg(v2) 30102 v0.AddArg(v1) 30103 v0.AddArg(y) 30104 return true 30105 } 30106 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30107 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30108 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30109 for { 30110 _ = v.Args[1] 30111 or := v.Args[0] 30112 if or.Op != OpAMD64ORL { 30113 break 30114 } 30115 _ = or.Args[1] 30116 y := or.Args[0] 30117 s1 := or.Args[1] 30118 if s1.Op != OpAMD64SHLLconst { 30119 break 30120 } 30121 j1 := s1.AuxInt 30122 x1 := s1.Args[0] 30123 if x1.Op != OpAMD64MOVBloadidx1 { 30124 break 30125 } 30126 i1 := x1.AuxInt 30127 s := x1.Aux 30128 mem := x1.Args[2] 30129 idx := x1.Args[0] 30130 p := x1.Args[1] 30131 s0 := v.Args[1] 30132 if s0.Op != OpAMD64SHLLconst { 30133 break 30134 } 30135 j0 := s0.AuxInt 30136 x0 := s0.Args[0] 30137 if x0.Op != OpAMD64MOVBloadidx1 { 30138 break 30139 } 30140 i0 := x0.AuxInt 30141 if x0.Aux != s { 30142 break 30143 } 30144 _ = x0.Args[2] 30145 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30146 break 30147 } 30148 b = mergePoint(b, x0, x1, y) 30149 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 30150 v.reset(OpCopy) 30151 v.AddArg(v0) 30152 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 30153 v1.AuxInt = j1 30154 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30155 v2.AuxInt = 8 30156 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30157 v3.AuxInt = i0 30158 v3.Aux = s 30159 v3.AddArg(p) 30160 v3.AddArg(idx) 30161 v3.AddArg(mem) 30162 v2.AddArg(v3) 30163 v1.AddArg(v2) 30164 v0.AddArg(v1) 30165 v0.AddArg(y) 30166 return true 30167 } 30168 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30169 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30170 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30171 for { 30172 _ = v.Args[1] 30173 or := v.Args[0] 30174 if or.Op != OpAMD64ORL { 30175 break 30176 } 30177 y := or.Args[1] 30178 s1 := or.Args[0] 30179 if s1.Op != OpAMD64SHLLconst { 30180 break 30181 } 30182 j1 := s1.AuxInt 30183 x1 := s1.Args[0] 30184 if x1.Op != OpAMD64MOVBloadidx1 { 30185 break 30186 } 30187 i1 := x1.AuxInt 30188 s := x1.Aux 30189 mem := x1.Args[2] 30190 p := x1.Args[0] 30191 idx := x1.Args[1] 30192 s0 := v.Args[1] 30193 if s0.Op != OpAMD64SHLLconst { 30194 break 30195 } 30196 j0 := s0.AuxInt 30197 x0 := s0.Args[0] 30198 if x0.Op != OpAMD64MOVBloadidx1 { 30199 break 30200 } 30201 i0 := x0.AuxInt 30202 if x0.Aux != s { 30203 break 30204 } 30205 _ = x0.Args[2] 30206 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30207 break 30208 } 30209 b = mergePoint(b, x0, x1, y) 30210 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 30211 v.reset(OpCopy) 30212 v.AddArg(v0) 30213 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 30214 v1.AuxInt = j1 30215 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30216 v2.AuxInt = 8 30217 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30218 v3.AuxInt = i0 30219 v3.Aux = s 30220 v3.AddArg(p) 30221 v3.AddArg(idx) 30222 v3.AddArg(mem) 30223 v2.AddArg(v3) 30224 v1.AddArg(v2) 30225 v0.AddArg(v1) 30226 v0.AddArg(y) 30227 return true 30228 } 30229 return false 30230 } 30231 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { 30232 b := v.Block 30233 typ := &b.Func.Config.Types 30234 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30235 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30236 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30237 for { 30238 _ = v.Args[1] 30239 or := v.Args[0] 30240 if or.Op != OpAMD64ORL { 30241 break 30242 } 30243 y := or.Args[1] 30244 s1 := or.Args[0] 30245 if s1.Op != OpAMD64SHLLconst { 30246 break 30247 } 30248 j1 := s1.AuxInt 30249 x1 := s1.Args[0] 30250 if x1.Op != OpAMD64MOVBloadidx1 { 30251 break 30252 } 30253 i1 := x1.AuxInt 30254 s := x1.Aux 30255 mem := x1.Args[2] 30256 idx := x1.Args[0] 30257 p := x1.Args[1] 30258 s0 := v.Args[1] 30259 if s0.Op != OpAMD64SHLLconst { 30260 break 30261 } 30262 j0 := s0.AuxInt 30263 x0 := s0.Args[0] 30264 if x0.Op != OpAMD64MOVBloadidx1 { 30265 break 30266 } 30267 i0 := x0.AuxInt 30268 if x0.Aux != s { 30269 break 30270 } 30271 _ = x0.Args[2] 30272 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30273 break 30274 } 30275 b = mergePoint(b, x0, x1, y) 30276 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 30277 v.reset(OpCopy) 30278 v.AddArg(v0) 30279 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 30280 v1.AuxInt = j1 30281 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30282 v2.AuxInt = 8 30283 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30284 v3.AuxInt = i0 30285 v3.Aux = s 30286 v3.AddArg(p) 30287 v3.AddArg(idx) 30288 v3.AddArg(mem) 30289 v2.AddArg(v3) 30290 v1.AddArg(v2) 30291 v0.AddArg(v1) 30292 v0.AddArg(y) 30293 return true 30294 } 30295 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30296 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30297 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30298 for { 30299 _ = v.Args[1] 30300 or := v.Args[0] 30301 if or.Op != OpAMD64ORL { 30302 break 30303 } 30304 _ = or.Args[1] 30305 y := or.Args[0] 30306 s1 := or.Args[1] 30307 if s1.Op != OpAMD64SHLLconst { 30308 break 30309 } 30310 j1 := s1.AuxInt 30311 x1 := s1.Args[0] 30312 if x1.Op != OpAMD64MOVBloadidx1 { 30313 break 30314 } 30315 i1 := x1.AuxInt 30316 s := x1.Aux 30317 mem := x1.Args[2] 30318 p := x1.Args[0] 30319 idx := x1.Args[1] 30320 s0 := v.Args[1] 30321 if s0.Op != OpAMD64SHLLconst { 30322 break 30323 } 30324 j0 := s0.AuxInt 30325 x0 := s0.Args[0] 30326 if x0.Op != OpAMD64MOVBloadidx1 { 30327 break 30328 } 30329 i0 := x0.AuxInt 30330 if x0.Aux != s { 30331 break 30332 } 30333 _ = x0.Args[2] 30334 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30335 break 30336 } 30337 b = mergePoint(b, x0, x1, y) 30338 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 30339 v.reset(OpCopy) 30340 v.AddArg(v0) 30341 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 30342 v1.AuxInt = j1 30343 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30344 v2.AuxInt = 8 30345 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30346 v3.AuxInt = i0 30347 v3.Aux = s 30348 v3.AddArg(p) 30349 v3.AddArg(idx) 30350 v3.AddArg(mem) 30351 v2.AddArg(v3) 30352 v1.AddArg(v2) 30353 v0.AddArg(v1) 30354 v0.AddArg(y) 30355 return true 30356 } 30357 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30358 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30359 // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30360 for { 30361 _ = v.Args[1] 30362 or := v.Args[0] 30363 if or.Op != OpAMD64ORL { 30364 break 30365 } 30366 _ = or.Args[1] 30367 y := or.Args[0] 30368 s1 := or.Args[1] 30369 if s1.Op != OpAMD64SHLLconst { 30370 break 30371 } 30372 j1 := s1.AuxInt 30373 x1 := s1.Args[0] 30374 if x1.Op != OpAMD64MOVBloadidx1 { 30375 break 30376 } 30377 i1 := x1.AuxInt 30378 s := x1.Aux 30379 mem := x1.Args[2] 30380 idx := x1.Args[0] 30381 p := x1.Args[1] 30382 s0 := v.Args[1] 30383 if s0.Op != OpAMD64SHLLconst { 30384 break 30385 } 30386 j0 := s0.AuxInt 30387 x0 := s0.Args[0] 30388 if x0.Op != OpAMD64MOVBloadidx1 { 30389 break 30390 } 30391 i0 := x0.AuxInt 30392 if x0.Aux != s { 30393 break 30394 } 30395 _ = x0.Args[2] 30396 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30397 break 30398 } 30399 b = mergePoint(b, x0, x1, y) 30400 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 30401 v.reset(OpCopy) 30402 v.AddArg(v0) 30403 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 30404 v1.AuxInt = j1 30405 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30406 v2.AuxInt = 8 30407 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30408 v3.AuxInt = i0 30409 v3.Aux = s 30410 v3.AddArg(p) 30411 v3.AddArg(idx) 30412 v3.AddArg(mem) 30413 v2.AddArg(v3) 30414 v1.AddArg(v2) 30415 v0.AddArg(v1) 30416 v0.AddArg(y) 30417 return true 30418 } 30419 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 30420 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 30421 // result: (ORLload x [off] {sym} ptr mem) 30422 for { 30423 _ = v.Args[1] 30424 x := v.Args[0] 30425 l := v.Args[1] 30426 if l.Op != OpAMD64MOVLload { 30427 break 30428 } 30429 off := l.AuxInt 30430 sym := l.Aux 30431 mem := l.Args[1] 30432 ptr := l.Args[0] 30433 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 30434 break 30435 } 30436 v.reset(OpAMD64ORLload) 30437 v.AuxInt = off 30438 v.Aux = sym 30439 v.AddArg(x) 30440 v.AddArg(ptr) 30441 v.AddArg(mem) 30442 return true 30443 } 30444 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 30445 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 30446 // result: (ORLload x [off] {sym} ptr mem) 30447 for { 30448 x := v.Args[1] 30449 l := v.Args[0] 30450 if l.Op != OpAMD64MOVLload { 30451 break 30452 } 30453 off := l.AuxInt 30454 sym := l.Aux 30455 mem := l.Args[1] 30456 ptr := l.Args[0] 30457 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 30458 break 30459 } 30460 v.reset(OpAMD64ORLload) 30461 v.AuxInt = off 30462 v.Aux = sym 30463 v.AddArg(x) 30464 v.AddArg(ptr) 30465 v.AddArg(mem) 30466 return true 30467 } 30468 return false 30469 } 30470 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { 30471 // match: (ORLconst [c] x) 30472 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 30473 // result: (BTSLconst [log2uint32(c)] x) 30474 for { 30475 c := v.AuxInt 30476 x := v.Args[0] 30477 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { 30478 break 30479 } 30480 v.reset(OpAMD64BTSLconst) 30481 v.AuxInt = log2uint32(c) 30482 v.AddArg(x) 30483 return true 30484 } 30485 // match: (ORLconst [c] (ORLconst [d] x)) 30486 // result: (ORLconst [c | d] x) 30487 for { 30488 c := v.AuxInt 30489 v_0 := v.Args[0] 30490 if v_0.Op != OpAMD64ORLconst { 30491 break 30492 } 30493 d := v_0.AuxInt 30494 x := v_0.Args[0] 30495 v.reset(OpAMD64ORLconst) 30496 v.AuxInt = c | d 30497 v.AddArg(x) 30498 return true 30499 } 30500 // match: (ORLconst [c] (BTSLconst [d] x)) 30501 // result: (ORLconst [c | 1<<uint32(d)] x) 30502 for { 30503 c := v.AuxInt 30504 v_0 := v.Args[0] 30505 if v_0.Op != OpAMD64BTSLconst { 30506 break 30507 } 30508 d := v_0.AuxInt 30509 x := v_0.Args[0] 30510 v.reset(OpAMD64ORLconst) 30511 v.AuxInt = c | 1<<uint32(d) 30512 v.AddArg(x) 30513 return true 30514 } 30515 // match: (ORLconst [c] x) 30516 // cond: int32(c)==0 30517 // result: x 30518 for { 30519 c := v.AuxInt 30520 x := v.Args[0] 30521 if !(int32(c) == 0) { 30522 break 30523 } 30524 v.reset(OpCopy) 30525 v.Type = x.Type 30526 v.AddArg(x) 30527 return true 30528 } 30529 // match: (ORLconst [c] _) 30530 // cond: int32(c)==-1 30531 // result: (MOVLconst [-1]) 30532 for { 30533 c := v.AuxInt 30534 if !(int32(c) == -1) { 30535 break 30536 } 30537 v.reset(OpAMD64MOVLconst) 30538 v.AuxInt = -1 30539 return true 30540 } 30541 // match: (ORLconst [c] (MOVLconst [d])) 30542 // result: (MOVLconst [c|d]) 30543 for { 30544 c := v.AuxInt 30545 v_0 := v.Args[0] 30546 if v_0.Op != OpAMD64MOVLconst { 30547 break 30548 } 30549 d := v_0.AuxInt 30550 v.reset(OpAMD64MOVLconst) 30551 v.AuxInt = c | d 30552 return true 30553 } 30554 return false 30555 } 30556 func rewriteValueAMD64_OpAMD64ORLconstmodify_0(v *Value) bool { 30557 // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 30558 // cond: ValAndOff(valoff1).canAdd(off2) 30559 // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 30560 for { 30561 valoff1 := v.AuxInt 30562 sym := v.Aux 30563 mem := v.Args[1] 30564 v_0 := v.Args[0] 30565 if v_0.Op != OpAMD64ADDQconst { 30566 break 30567 } 30568 off2 := v_0.AuxInt 30569 base := v_0.Args[0] 30570 if !(ValAndOff(valoff1).canAdd(off2)) { 30571 break 30572 } 30573 v.reset(OpAMD64ORLconstmodify) 30574 v.AuxInt = ValAndOff(valoff1).add(off2) 30575 v.Aux = sym 30576 v.AddArg(base) 30577 v.AddArg(mem) 30578 return true 30579 } 30580 // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 30581 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 30582 // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 30583 for { 30584 valoff1 := v.AuxInt 30585 sym1 := v.Aux 30586 mem := v.Args[1] 30587 v_0 := v.Args[0] 30588 if v_0.Op != OpAMD64LEAQ { 30589 break 30590 } 30591 off2 := v_0.AuxInt 30592 sym2 := v_0.Aux 30593 base := v_0.Args[0] 30594 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 30595 break 30596 } 30597 v.reset(OpAMD64ORLconstmodify) 30598 v.AuxInt = ValAndOff(valoff1).add(off2) 30599 v.Aux = mergeSym(sym1, sym2) 30600 v.AddArg(base) 30601 v.AddArg(mem) 30602 return true 30603 } 30604 return false 30605 } 30606 func rewriteValueAMD64_OpAMD64ORLload_0(v *Value) bool { 30607 b := v.Block 30608 typ := &b.Func.Config.Types 30609 // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem) 30610 // cond: is32Bit(off1+off2) 30611 // result: (ORLload [off1+off2] {sym} val base mem) 30612 for { 30613 off1 := v.AuxInt 30614 sym := v.Aux 30615 mem := v.Args[2] 30616 val := v.Args[0] 30617 v_1 := v.Args[1] 30618 if v_1.Op != OpAMD64ADDQconst { 30619 break 30620 } 30621 off2 := v_1.AuxInt 30622 base := v_1.Args[0] 30623 if !(is32Bit(off1 + off2)) { 30624 break 30625 } 30626 v.reset(OpAMD64ORLload) 30627 v.AuxInt = off1 + off2 30628 v.Aux = sym 30629 v.AddArg(val) 30630 v.AddArg(base) 30631 v.AddArg(mem) 30632 return true 30633 } 30634 // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 30635 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 30636 // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 30637 for { 30638 off1 := v.AuxInt 30639 sym1 := v.Aux 30640 mem := v.Args[2] 30641 val := v.Args[0] 30642 v_1 := v.Args[1] 30643 if v_1.Op != OpAMD64LEAQ { 30644 break 30645 } 30646 off2 := v_1.AuxInt 30647 sym2 := v_1.Aux 30648 base := v_1.Args[0] 30649 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 30650 break 30651 } 30652 v.reset(OpAMD64ORLload) 30653 v.AuxInt = off1 + off2 30654 v.Aux = mergeSym(sym1, sym2) 30655 v.AddArg(val) 30656 v.AddArg(base) 30657 v.AddArg(mem) 30658 return true 30659 } 30660 // match: (ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 30661 // result: ( ORL x (MOVLf2i y)) 30662 for { 30663 off := v.AuxInt 30664 sym := v.Aux 30665 _ = v.Args[2] 30666 x := v.Args[0] 30667 ptr := v.Args[1] 30668 v_2 := v.Args[2] 30669 if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { 30670 break 30671 } 30672 _ = v_2.Args[2] 30673 if ptr != v_2.Args[0] { 30674 break 30675 } 30676 y := v_2.Args[1] 30677 v.reset(OpAMD64ORL) 30678 v.AddArg(x) 30679 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 30680 v0.AddArg(y) 30681 v.AddArg(v0) 30682 return true 30683 } 30684 return false 30685 } 30686 func rewriteValueAMD64_OpAMD64ORLmodify_0(v *Value) bool { 30687 // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 30688 // cond: is32Bit(off1+off2) 30689 // result: (ORLmodify [off1+off2] {sym} base val mem) 30690 for { 30691 off1 := v.AuxInt 30692 sym := v.Aux 30693 mem := v.Args[2] 30694 v_0 := v.Args[0] 30695 if v_0.Op != OpAMD64ADDQconst { 30696 break 30697 } 30698 off2 := v_0.AuxInt 30699 base := v_0.Args[0] 30700 val := v.Args[1] 30701 if !(is32Bit(off1 + off2)) { 30702 break 30703 } 30704 v.reset(OpAMD64ORLmodify) 30705 v.AuxInt = off1 + off2 30706 v.Aux = sym 30707 v.AddArg(base) 30708 v.AddArg(val) 30709 v.AddArg(mem) 30710 return true 30711 } 30712 // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 30713 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 30714 // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 30715 for { 30716 off1 := v.AuxInt 30717 sym1 := v.Aux 30718 mem := v.Args[2] 30719 v_0 := v.Args[0] 30720 if v_0.Op != OpAMD64LEAQ { 30721 break 30722 } 30723 off2 := v_0.AuxInt 30724 sym2 := v_0.Aux 30725 base := v_0.Args[0] 30726 val := v.Args[1] 30727 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 30728 break 30729 } 30730 v.reset(OpAMD64ORLmodify) 30731 v.AuxInt = off1 + off2 30732 v.Aux = mergeSym(sym1, sym2) 30733 v.AddArg(base) 30734 v.AddArg(val) 30735 v.AddArg(mem) 30736 return true 30737 } 30738 return false 30739 } 30740 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { 30741 // match: (ORQ (SHLQ (MOVQconst [1]) y) x) 30742 // result: (BTSQ x y) 30743 for { 30744 x := v.Args[1] 30745 v_0 := v.Args[0] 30746 if v_0.Op != OpAMD64SHLQ { 30747 break 30748 } 30749 y := v_0.Args[1] 30750 v_0_0 := v_0.Args[0] 30751 if v_0_0.Op != OpAMD64MOVQconst || v_0_0.AuxInt != 1 { 30752 break 30753 } 30754 v.reset(OpAMD64BTSQ) 30755 v.AddArg(x) 30756 v.AddArg(y) 30757 return true 30758 } 30759 // match: (ORQ x (SHLQ (MOVQconst [1]) y)) 30760 // result: (BTSQ x y) 30761 for { 30762 _ = v.Args[1] 30763 x := v.Args[0] 30764 v_1 := v.Args[1] 30765 if v_1.Op != OpAMD64SHLQ { 30766 break 30767 } 30768 y := v_1.Args[1] 30769 v_1_0 := v_1.Args[0] 30770 if v_1_0.Op != OpAMD64MOVQconst || v_1_0.AuxInt != 1 { 30771 break 30772 } 30773 v.reset(OpAMD64BTSQ) 30774 v.AddArg(x) 30775 v.AddArg(y) 30776 return true 30777 } 30778 // match: (ORQ (MOVQconst [c]) x) 30779 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 30780 // result: (BTSQconst [log2(c)] x) 30781 for { 30782 x := v.Args[1] 30783 v_0 := v.Args[0] 30784 if v_0.Op != OpAMD64MOVQconst { 30785 break 30786 } 30787 c := v_0.AuxInt 30788 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { 30789 break 30790 } 30791 v.reset(OpAMD64BTSQconst) 30792 v.AuxInt = log2(c) 30793 v.AddArg(x) 30794 return true 30795 } 30796 // match: (ORQ x (MOVQconst [c])) 30797 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 30798 // result: (BTSQconst [log2(c)] x) 30799 for { 30800 _ = v.Args[1] 30801 x := v.Args[0] 30802 v_1 := v.Args[1] 30803 if v_1.Op != OpAMD64MOVQconst { 30804 break 30805 } 30806 c := v_1.AuxInt 30807 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { 30808 break 30809 } 30810 v.reset(OpAMD64BTSQconst) 30811 v.AuxInt = log2(c) 30812 v.AddArg(x) 30813 return true 30814 } 30815 // match: (ORQ x (MOVQconst [c])) 30816 // cond: is32Bit(c) 30817 // result: (ORQconst [c] x) 30818 for { 30819 _ = v.Args[1] 30820 x := v.Args[0] 30821 v_1 := v.Args[1] 30822 if v_1.Op != OpAMD64MOVQconst { 30823 break 30824 } 30825 c := v_1.AuxInt 30826 if !(is32Bit(c)) { 30827 break 30828 } 30829 v.reset(OpAMD64ORQconst) 30830 v.AuxInt = c 30831 v.AddArg(x) 30832 return true 30833 } 30834 // match: (ORQ (MOVQconst [c]) x) 30835 // cond: is32Bit(c) 30836 // result: (ORQconst [c] x) 30837 for { 30838 x := v.Args[1] 30839 v_0 := v.Args[0] 30840 if v_0.Op != OpAMD64MOVQconst { 30841 break 30842 } 30843 c := v_0.AuxInt 30844 if !(is32Bit(c)) { 30845 break 30846 } 30847 v.reset(OpAMD64ORQconst) 30848 v.AuxInt = c 30849 v.AddArg(x) 30850 return true 30851 } 30852 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 30853 // cond: d==64-c 30854 // result: (ROLQconst x [c]) 30855 for { 30856 _ = v.Args[1] 30857 v_0 := v.Args[0] 30858 if v_0.Op != OpAMD64SHLQconst { 30859 break 30860 } 30861 c := v_0.AuxInt 30862 x := v_0.Args[0] 30863 v_1 := v.Args[1] 30864 if v_1.Op != OpAMD64SHRQconst { 30865 break 30866 } 30867 d := v_1.AuxInt 30868 if x != v_1.Args[0] || !(d == 64-c) { 30869 break 30870 } 30871 v.reset(OpAMD64ROLQconst) 30872 v.AuxInt = c 30873 v.AddArg(x) 30874 return true 30875 } 30876 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 30877 // cond: d==64-c 30878 // result: (ROLQconst x [c]) 30879 for { 30880 _ = v.Args[1] 30881 v_0 := v.Args[0] 30882 if v_0.Op != OpAMD64SHRQconst { 30883 break 30884 } 30885 d := v_0.AuxInt 30886 x := v_0.Args[0] 30887 v_1 := v.Args[1] 30888 if v_1.Op != OpAMD64SHLQconst { 30889 break 30890 } 30891 c := v_1.AuxInt 30892 if x != v_1.Args[0] || !(d == 64-c) { 30893 break 30894 } 30895 v.reset(OpAMD64ROLQconst) 30896 v.AuxInt = c 30897 v.AddArg(x) 30898 return true 30899 } 30900 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 30901 // result: (ROLQ x y) 30902 for { 30903 _ = v.Args[1] 30904 v_0 := v.Args[0] 30905 if v_0.Op != OpAMD64SHLQ { 30906 break 30907 } 30908 y := v_0.Args[1] 30909 x := v_0.Args[0] 30910 v_1 := v.Args[1] 30911 if v_1.Op != OpAMD64ANDQ { 30912 break 30913 } 30914 _ = v_1.Args[1] 30915 v_1_0 := v_1.Args[0] 30916 if v_1_0.Op != OpAMD64SHRQ { 30917 break 30918 } 30919 _ = v_1_0.Args[1] 30920 if x != v_1_0.Args[0] { 30921 break 30922 } 30923 v_1_0_1 := v_1_0.Args[1] 30924 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { 30925 break 30926 } 30927 v_1_1 := v_1.Args[1] 30928 if v_1_1.Op != OpAMD64SBBQcarrymask { 30929 break 30930 } 30931 v_1_1_0 := v_1_1.Args[0] 30932 if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 64 { 30933 break 30934 } 30935 v_1_1_0_0 := v_1_1_0.Args[0] 30936 if v_1_1_0_0.Op != OpAMD64NEGQ { 30937 break 30938 } 30939 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 30940 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -64 { 30941 break 30942 } 30943 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 30944 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { 30945 break 30946 } 30947 v.reset(OpAMD64ROLQ) 30948 v.AddArg(x) 30949 v.AddArg(y) 30950 return true 30951 } 30952 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) 30953 // result: (ROLQ x y) 30954 for { 30955 _ = v.Args[1] 30956 v_0 := v.Args[0] 30957 if v_0.Op != OpAMD64SHLQ { 30958 break 30959 } 30960 y := v_0.Args[1] 30961 x := v_0.Args[0] 30962 v_1 := v.Args[1] 30963 if v_1.Op != OpAMD64ANDQ { 30964 break 30965 } 30966 _ = v_1.Args[1] 30967 v_1_0 := v_1.Args[0] 30968 if v_1_0.Op != OpAMD64SBBQcarrymask { 30969 break 30970 } 30971 v_1_0_0 := v_1_0.Args[0] 30972 if v_1_0_0.Op != OpAMD64CMPQconst || v_1_0_0.AuxInt != 64 { 30973 break 30974 } 30975 v_1_0_0_0 := v_1_0_0.Args[0] 30976 if v_1_0_0_0.Op != OpAMD64NEGQ { 30977 break 30978 } 30979 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 30980 if v_1_0_0_0_0.Op != OpAMD64ADDQconst || v_1_0_0_0_0.AuxInt != -64 { 30981 break 30982 } 30983 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 30984 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst || v_1_0_0_0_0_0.AuxInt != 63 || y != v_1_0_0_0_0_0.Args[0] { 30985 break 30986 } 30987 v_1_1 := v_1.Args[1] 30988 if v_1_1.Op != OpAMD64SHRQ { 30989 break 30990 } 30991 _ = v_1_1.Args[1] 30992 if x != v_1_1.Args[0] { 30993 break 30994 } 30995 v_1_1_1 := v_1_1.Args[1] 30996 if v_1_1_1.Op != OpAMD64NEGQ || y != v_1_1_1.Args[0] { 30997 break 30998 } 30999 v.reset(OpAMD64ROLQ) 31000 v.AddArg(x) 31001 v.AddArg(y) 31002 return true 31003 } 31004 return false 31005 } 31006 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { 31007 // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) 31008 // result: (ROLQ x y) 31009 for { 31010 _ = v.Args[1] 31011 v_0 := v.Args[0] 31012 if v_0.Op != OpAMD64ANDQ { 31013 break 31014 } 31015 _ = v_0.Args[1] 31016 v_0_0 := v_0.Args[0] 31017 if v_0_0.Op != OpAMD64SHRQ { 31018 break 31019 } 31020 _ = v_0_0.Args[1] 31021 x := v_0_0.Args[0] 31022 v_0_0_1 := v_0_0.Args[1] 31023 if v_0_0_1.Op != OpAMD64NEGQ { 31024 break 31025 } 31026 y := v_0_0_1.Args[0] 31027 v_0_1 := v_0.Args[1] 31028 if v_0_1.Op != OpAMD64SBBQcarrymask { 31029 break 31030 } 31031 v_0_1_0 := v_0_1.Args[0] 31032 if v_0_1_0.Op != OpAMD64CMPQconst || v_0_1_0.AuxInt != 64 { 31033 break 31034 } 31035 v_0_1_0_0 := v_0_1_0.Args[0] 31036 if v_0_1_0_0.Op != OpAMD64NEGQ { 31037 break 31038 } 31039 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 31040 if v_0_1_0_0_0.Op != OpAMD64ADDQconst || v_0_1_0_0_0.AuxInt != -64 { 31041 break 31042 } 31043 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 31044 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0_0_0.AuxInt != 63 || y != v_0_1_0_0_0_0.Args[0] { 31045 break 31046 } 31047 v_1 := v.Args[1] 31048 if v_1.Op != OpAMD64SHLQ { 31049 break 31050 } 31051 _ = v_1.Args[1] 31052 if x != v_1.Args[0] || y != v_1.Args[1] { 31053 break 31054 } 31055 v.reset(OpAMD64ROLQ) 31056 v.AddArg(x) 31057 v.AddArg(y) 31058 return true 31059 } 31060 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) 31061 // result: (ROLQ x y) 31062 for { 31063 _ = v.Args[1] 31064 v_0 := v.Args[0] 31065 if v_0.Op != OpAMD64ANDQ { 31066 break 31067 } 31068 _ = v_0.Args[1] 31069 v_0_0 := v_0.Args[0] 31070 if v_0_0.Op != OpAMD64SBBQcarrymask { 31071 break 31072 } 31073 v_0_0_0 := v_0_0.Args[0] 31074 if v_0_0_0.Op != OpAMD64CMPQconst || v_0_0_0.AuxInt != 64 { 31075 break 31076 } 31077 v_0_0_0_0 := v_0_0_0.Args[0] 31078 if v_0_0_0_0.Op != OpAMD64NEGQ { 31079 break 31080 } 31081 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 31082 if v_0_0_0_0_0.Op != OpAMD64ADDQconst || v_0_0_0_0_0.AuxInt != -64 { 31083 break 31084 } 31085 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 31086 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0_0_0_0.AuxInt != 63 { 31087 break 31088 } 31089 y := v_0_0_0_0_0_0.Args[0] 31090 v_0_1 := v_0.Args[1] 31091 if v_0_1.Op != OpAMD64SHRQ { 31092 break 31093 } 31094 _ = v_0_1.Args[1] 31095 x := v_0_1.Args[0] 31096 v_0_1_1 := v_0_1.Args[1] 31097 if v_0_1_1.Op != OpAMD64NEGQ || y != v_0_1_1.Args[0] { 31098 break 31099 } 31100 v_1 := v.Args[1] 31101 if v_1.Op != OpAMD64SHLQ { 31102 break 31103 } 31104 _ = v_1.Args[1] 31105 if x != v_1.Args[0] || y != v_1.Args[1] { 31106 break 31107 } 31108 v.reset(OpAMD64ROLQ) 31109 v.AddArg(x) 31110 v.AddArg(y) 31111 return true 31112 } 31113 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 31114 // result: (ROLQ x y) 31115 for { 31116 _ = v.Args[1] 31117 v_0 := v.Args[0] 31118 if v_0.Op != OpAMD64SHLQ { 31119 break 31120 } 31121 y := v_0.Args[1] 31122 x := v_0.Args[0] 31123 v_1 := v.Args[1] 31124 if v_1.Op != OpAMD64ANDQ { 31125 break 31126 } 31127 _ = v_1.Args[1] 31128 v_1_0 := v_1.Args[0] 31129 if v_1_0.Op != OpAMD64SHRQ { 31130 break 31131 } 31132 _ = v_1_0.Args[1] 31133 if x != v_1_0.Args[0] { 31134 break 31135 } 31136 v_1_0_1 := v_1_0.Args[1] 31137 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { 31138 break 31139 } 31140 v_1_1 := v_1.Args[1] 31141 if v_1_1.Op != OpAMD64SBBQcarrymask { 31142 break 31143 } 31144 v_1_1_0 := v_1_1.Args[0] 31145 if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 64 { 31146 break 31147 } 31148 v_1_1_0_0 := v_1_1_0.Args[0] 31149 if v_1_1_0_0.Op != OpAMD64NEGL { 31150 break 31151 } 31152 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 31153 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -64 { 31154 break 31155 } 31156 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 31157 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { 31158 break 31159 } 31160 v.reset(OpAMD64ROLQ) 31161 v.AddArg(x) 31162 v.AddArg(y) 31163 return true 31164 } 31165 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) 31166 // result: (ROLQ x y) 31167 for { 31168 _ = v.Args[1] 31169 v_0 := v.Args[0] 31170 if v_0.Op != OpAMD64SHLQ { 31171 break 31172 } 31173 y := v_0.Args[1] 31174 x := v_0.Args[0] 31175 v_1 := v.Args[1] 31176 if v_1.Op != OpAMD64ANDQ { 31177 break 31178 } 31179 _ = v_1.Args[1] 31180 v_1_0 := v_1.Args[0] 31181 if v_1_0.Op != OpAMD64SBBQcarrymask { 31182 break 31183 } 31184 v_1_0_0 := v_1_0.Args[0] 31185 if v_1_0_0.Op != OpAMD64CMPLconst || v_1_0_0.AuxInt != 64 { 31186 break 31187 } 31188 v_1_0_0_0 := v_1_0_0.Args[0] 31189 if v_1_0_0_0.Op != OpAMD64NEGL { 31190 break 31191 } 31192 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 31193 if v_1_0_0_0_0.Op != OpAMD64ADDLconst || v_1_0_0_0_0.AuxInt != -64 { 31194 break 31195 } 31196 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 31197 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst || v_1_0_0_0_0_0.AuxInt != 63 || y != v_1_0_0_0_0_0.Args[0] { 31198 break 31199 } 31200 v_1_1 := v_1.Args[1] 31201 if v_1_1.Op != OpAMD64SHRQ { 31202 break 31203 } 31204 _ = v_1_1.Args[1] 31205 if x != v_1_1.Args[0] { 31206 break 31207 } 31208 v_1_1_1 := v_1_1.Args[1] 31209 if v_1_1_1.Op != OpAMD64NEGL || y != v_1_1_1.Args[0] { 31210 break 31211 } 31212 v.reset(OpAMD64ROLQ) 31213 v.AddArg(x) 31214 v.AddArg(y) 31215 return true 31216 } 31217 // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) 31218 // result: (ROLQ x y) 31219 for { 31220 _ = v.Args[1] 31221 v_0 := v.Args[0] 31222 if v_0.Op != OpAMD64ANDQ { 31223 break 31224 } 31225 _ = v_0.Args[1] 31226 v_0_0 := v_0.Args[0] 31227 if v_0_0.Op != OpAMD64SHRQ { 31228 break 31229 } 31230 _ = v_0_0.Args[1] 31231 x := v_0_0.Args[0] 31232 v_0_0_1 := v_0_0.Args[1] 31233 if v_0_0_1.Op != OpAMD64NEGL { 31234 break 31235 } 31236 y := v_0_0_1.Args[0] 31237 v_0_1 := v_0.Args[1] 31238 if v_0_1.Op != OpAMD64SBBQcarrymask { 31239 break 31240 } 31241 v_0_1_0 := v_0_1.Args[0] 31242 if v_0_1_0.Op != OpAMD64CMPLconst || v_0_1_0.AuxInt != 64 { 31243 break 31244 } 31245 v_0_1_0_0 := v_0_1_0.Args[0] 31246 if v_0_1_0_0.Op != OpAMD64NEGL { 31247 break 31248 } 31249 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 31250 if v_0_1_0_0_0.Op != OpAMD64ADDLconst || v_0_1_0_0_0.AuxInt != -64 { 31251 break 31252 } 31253 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 31254 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0_0_0.AuxInt != 63 || y != v_0_1_0_0_0_0.Args[0] { 31255 break 31256 } 31257 v_1 := v.Args[1] 31258 if v_1.Op != OpAMD64SHLQ { 31259 break 31260 } 31261 _ = v_1.Args[1] 31262 if x != v_1.Args[0] || y != v_1.Args[1] { 31263 break 31264 } 31265 v.reset(OpAMD64ROLQ) 31266 v.AddArg(x) 31267 v.AddArg(y) 31268 return true 31269 } 31270 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) 31271 // result: (ROLQ x y) 31272 for { 31273 _ = v.Args[1] 31274 v_0 := v.Args[0] 31275 if v_0.Op != OpAMD64ANDQ { 31276 break 31277 } 31278 _ = v_0.Args[1] 31279 v_0_0 := v_0.Args[0] 31280 if v_0_0.Op != OpAMD64SBBQcarrymask { 31281 break 31282 } 31283 v_0_0_0 := v_0_0.Args[0] 31284 if v_0_0_0.Op != OpAMD64CMPLconst || v_0_0_0.AuxInt != 64 { 31285 break 31286 } 31287 v_0_0_0_0 := v_0_0_0.Args[0] 31288 if v_0_0_0_0.Op != OpAMD64NEGL { 31289 break 31290 } 31291 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 31292 if v_0_0_0_0_0.Op != OpAMD64ADDLconst || v_0_0_0_0_0.AuxInt != -64 { 31293 break 31294 } 31295 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 31296 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst || v_0_0_0_0_0_0.AuxInt != 63 { 31297 break 31298 } 31299 y := v_0_0_0_0_0_0.Args[0] 31300 v_0_1 := v_0.Args[1] 31301 if v_0_1.Op != OpAMD64SHRQ { 31302 break 31303 } 31304 _ = v_0_1.Args[1] 31305 x := v_0_1.Args[0] 31306 v_0_1_1 := v_0_1.Args[1] 31307 if v_0_1_1.Op != OpAMD64NEGL || y != v_0_1_1.Args[0] { 31308 break 31309 } 31310 v_1 := v.Args[1] 31311 if v_1.Op != OpAMD64SHLQ { 31312 break 31313 } 31314 _ = v_1.Args[1] 31315 if x != v_1.Args[0] || y != v_1.Args[1] { 31316 break 31317 } 31318 v.reset(OpAMD64ROLQ) 31319 v.AddArg(x) 31320 v.AddArg(y) 31321 return true 31322 } 31323 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 31324 // result: (RORQ x y) 31325 for { 31326 _ = v.Args[1] 31327 v_0 := v.Args[0] 31328 if v_0.Op != OpAMD64SHRQ { 31329 break 31330 } 31331 y := v_0.Args[1] 31332 x := v_0.Args[0] 31333 v_1 := v.Args[1] 31334 if v_1.Op != OpAMD64ANDQ { 31335 break 31336 } 31337 _ = v_1.Args[1] 31338 v_1_0 := v_1.Args[0] 31339 if v_1_0.Op != OpAMD64SHLQ { 31340 break 31341 } 31342 _ = v_1_0.Args[1] 31343 if x != v_1_0.Args[0] { 31344 break 31345 } 31346 v_1_0_1 := v_1_0.Args[1] 31347 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] { 31348 break 31349 } 31350 v_1_1 := v_1.Args[1] 31351 if v_1_1.Op != OpAMD64SBBQcarrymask { 31352 break 31353 } 31354 v_1_1_0 := v_1_1.Args[0] 31355 if v_1_1_0.Op != OpAMD64CMPQconst || v_1_1_0.AuxInt != 64 { 31356 break 31357 } 31358 v_1_1_0_0 := v_1_1_0.Args[0] 31359 if v_1_1_0_0.Op != OpAMD64NEGQ { 31360 break 31361 } 31362 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 31363 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || v_1_1_0_0_0.AuxInt != -64 { 31364 break 31365 } 31366 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 31367 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { 31368 break 31369 } 31370 v.reset(OpAMD64RORQ) 31371 v.AddArg(x) 31372 v.AddArg(y) 31373 return true 31374 } 31375 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) 31376 // result: (RORQ x y) 31377 for { 31378 _ = v.Args[1] 31379 v_0 := v.Args[0] 31380 if v_0.Op != OpAMD64SHRQ { 31381 break 31382 } 31383 y := v_0.Args[1] 31384 x := v_0.Args[0] 31385 v_1 := v.Args[1] 31386 if v_1.Op != OpAMD64ANDQ { 31387 break 31388 } 31389 _ = v_1.Args[1] 31390 v_1_0 := v_1.Args[0] 31391 if v_1_0.Op != OpAMD64SBBQcarrymask { 31392 break 31393 } 31394 v_1_0_0 := v_1_0.Args[0] 31395 if v_1_0_0.Op != OpAMD64CMPQconst || v_1_0_0.AuxInt != 64 { 31396 break 31397 } 31398 v_1_0_0_0 := v_1_0_0.Args[0] 31399 if v_1_0_0_0.Op != OpAMD64NEGQ { 31400 break 31401 } 31402 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 31403 if v_1_0_0_0_0.Op != OpAMD64ADDQconst || v_1_0_0_0_0.AuxInt != -64 { 31404 break 31405 } 31406 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 31407 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst || v_1_0_0_0_0_0.AuxInt != 63 || y != v_1_0_0_0_0_0.Args[0] { 31408 break 31409 } 31410 v_1_1 := v_1.Args[1] 31411 if v_1_1.Op != OpAMD64SHLQ { 31412 break 31413 } 31414 _ = v_1_1.Args[1] 31415 if x != v_1_1.Args[0] { 31416 break 31417 } 31418 v_1_1_1 := v_1_1.Args[1] 31419 if v_1_1_1.Op != OpAMD64NEGQ || y != v_1_1_1.Args[0] { 31420 break 31421 } 31422 v.reset(OpAMD64RORQ) 31423 v.AddArg(x) 31424 v.AddArg(y) 31425 return true 31426 } 31427 // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) 31428 // result: (RORQ x y) 31429 for { 31430 _ = v.Args[1] 31431 v_0 := v.Args[0] 31432 if v_0.Op != OpAMD64ANDQ { 31433 break 31434 } 31435 _ = v_0.Args[1] 31436 v_0_0 := v_0.Args[0] 31437 if v_0_0.Op != OpAMD64SHLQ { 31438 break 31439 } 31440 _ = v_0_0.Args[1] 31441 x := v_0_0.Args[0] 31442 v_0_0_1 := v_0_0.Args[1] 31443 if v_0_0_1.Op != OpAMD64NEGQ { 31444 break 31445 } 31446 y := v_0_0_1.Args[0] 31447 v_0_1 := v_0.Args[1] 31448 if v_0_1.Op != OpAMD64SBBQcarrymask { 31449 break 31450 } 31451 v_0_1_0 := v_0_1.Args[0] 31452 if v_0_1_0.Op != OpAMD64CMPQconst || v_0_1_0.AuxInt != 64 { 31453 break 31454 } 31455 v_0_1_0_0 := v_0_1_0.Args[0] 31456 if v_0_1_0_0.Op != OpAMD64NEGQ { 31457 break 31458 } 31459 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 31460 if v_0_1_0_0_0.Op != OpAMD64ADDQconst || v_0_1_0_0_0.AuxInt != -64 { 31461 break 31462 } 31463 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 31464 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst || v_0_1_0_0_0_0.AuxInt != 63 || y != v_0_1_0_0_0_0.Args[0] { 31465 break 31466 } 31467 v_1 := v.Args[1] 31468 if v_1.Op != OpAMD64SHRQ { 31469 break 31470 } 31471 _ = v_1.Args[1] 31472 if x != v_1.Args[0] || y != v_1.Args[1] { 31473 break 31474 } 31475 v.reset(OpAMD64RORQ) 31476 v.AddArg(x) 31477 v.AddArg(y) 31478 return true 31479 } 31480 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) 31481 // result: (RORQ x y) 31482 for { 31483 _ = v.Args[1] 31484 v_0 := v.Args[0] 31485 if v_0.Op != OpAMD64ANDQ { 31486 break 31487 } 31488 _ = v_0.Args[1] 31489 v_0_0 := v_0.Args[0] 31490 if v_0_0.Op != OpAMD64SBBQcarrymask { 31491 break 31492 } 31493 v_0_0_0 := v_0_0.Args[0] 31494 if v_0_0_0.Op != OpAMD64CMPQconst || v_0_0_0.AuxInt != 64 { 31495 break 31496 } 31497 v_0_0_0_0 := v_0_0_0.Args[0] 31498 if v_0_0_0_0.Op != OpAMD64NEGQ { 31499 break 31500 } 31501 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 31502 if v_0_0_0_0_0.Op != OpAMD64ADDQconst || v_0_0_0_0_0.AuxInt != -64 { 31503 break 31504 } 31505 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 31506 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0_0_0_0.AuxInt != 63 { 31507 break 31508 } 31509 y := v_0_0_0_0_0_0.Args[0] 31510 v_0_1 := v_0.Args[1] 31511 if v_0_1.Op != OpAMD64SHLQ { 31512 break 31513 } 31514 _ = v_0_1.Args[1] 31515 x := v_0_1.Args[0] 31516 v_0_1_1 := v_0_1.Args[1] 31517 if v_0_1_1.Op != OpAMD64NEGQ || y != v_0_1_1.Args[0] { 31518 break 31519 } 31520 v_1 := v.Args[1] 31521 if v_1.Op != OpAMD64SHRQ { 31522 break 31523 } 31524 _ = v_1.Args[1] 31525 if x != v_1.Args[0] || y != v_1.Args[1] { 31526 break 31527 } 31528 v.reset(OpAMD64RORQ) 31529 v.AddArg(x) 31530 v.AddArg(y) 31531 return true 31532 } 31533 return false 31534 } 31535 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { 31536 b := v.Block 31537 typ := &b.Func.Config.Types 31538 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 31539 // result: (RORQ x y) 31540 for { 31541 _ = v.Args[1] 31542 v_0 := v.Args[0] 31543 if v_0.Op != OpAMD64SHRQ { 31544 break 31545 } 31546 y := v_0.Args[1] 31547 x := v_0.Args[0] 31548 v_1 := v.Args[1] 31549 if v_1.Op != OpAMD64ANDQ { 31550 break 31551 } 31552 _ = v_1.Args[1] 31553 v_1_0 := v_1.Args[0] 31554 if v_1_0.Op != OpAMD64SHLQ { 31555 break 31556 } 31557 _ = v_1_0.Args[1] 31558 if x != v_1_0.Args[0] { 31559 break 31560 } 31561 v_1_0_1 := v_1_0.Args[1] 31562 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] { 31563 break 31564 } 31565 v_1_1 := v_1.Args[1] 31566 if v_1_1.Op != OpAMD64SBBQcarrymask { 31567 break 31568 } 31569 v_1_1_0 := v_1_1.Args[0] 31570 if v_1_1_0.Op != OpAMD64CMPLconst || v_1_1_0.AuxInt != 64 { 31571 break 31572 } 31573 v_1_1_0_0 := v_1_1_0.Args[0] 31574 if v_1_1_0_0.Op != OpAMD64NEGL { 31575 break 31576 } 31577 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 31578 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || v_1_1_0_0_0.AuxInt != -64 { 31579 break 31580 } 31581 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 31582 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || v_1_1_0_0_0_0.AuxInt != 63 || y != v_1_1_0_0_0_0.Args[0] { 31583 break 31584 } 31585 v.reset(OpAMD64RORQ) 31586 v.AddArg(x) 31587 v.AddArg(y) 31588 return true 31589 } 31590 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) 31591 // result: (RORQ x y) 31592 for { 31593 _ = v.Args[1] 31594 v_0 := v.Args[0] 31595 if v_0.Op != OpAMD64SHRQ { 31596 break 31597 } 31598 y := v_0.Args[1] 31599 x := v_0.Args[0] 31600 v_1 := v.Args[1] 31601 if v_1.Op != OpAMD64ANDQ { 31602 break 31603 } 31604 _ = v_1.Args[1] 31605 v_1_0 := v_1.Args[0] 31606 if v_1_0.Op != OpAMD64SBBQcarrymask { 31607 break 31608 } 31609 v_1_0_0 := v_1_0.Args[0] 31610 if v_1_0_0.Op != OpAMD64CMPLconst || v_1_0_0.AuxInt != 64 { 31611 break 31612 } 31613 v_1_0_0_0 := v_1_0_0.Args[0] 31614 if v_1_0_0_0.Op != OpAMD64NEGL { 31615 break 31616 } 31617 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 31618 if v_1_0_0_0_0.Op != OpAMD64ADDLconst || v_1_0_0_0_0.AuxInt != -64 { 31619 break 31620 } 31621 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 31622 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst || v_1_0_0_0_0_0.AuxInt != 63 || y != v_1_0_0_0_0_0.Args[0] { 31623 break 31624 } 31625 v_1_1 := v_1.Args[1] 31626 if v_1_1.Op != OpAMD64SHLQ { 31627 break 31628 } 31629 _ = v_1_1.Args[1] 31630 if x != v_1_1.Args[0] { 31631 break 31632 } 31633 v_1_1_1 := v_1_1.Args[1] 31634 if v_1_1_1.Op != OpAMD64NEGL || y != v_1_1_1.Args[0] { 31635 break 31636 } 31637 v.reset(OpAMD64RORQ) 31638 v.AddArg(x) 31639 v.AddArg(y) 31640 return true 31641 } 31642 // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) 31643 // result: (RORQ x y) 31644 for { 31645 _ = v.Args[1] 31646 v_0 := v.Args[0] 31647 if v_0.Op != OpAMD64ANDQ { 31648 break 31649 } 31650 _ = v_0.Args[1] 31651 v_0_0 := v_0.Args[0] 31652 if v_0_0.Op != OpAMD64SHLQ { 31653 break 31654 } 31655 _ = v_0_0.Args[1] 31656 x := v_0_0.Args[0] 31657 v_0_0_1 := v_0_0.Args[1] 31658 if v_0_0_1.Op != OpAMD64NEGL { 31659 break 31660 } 31661 y := v_0_0_1.Args[0] 31662 v_0_1 := v_0.Args[1] 31663 if v_0_1.Op != OpAMD64SBBQcarrymask { 31664 break 31665 } 31666 v_0_1_0 := v_0_1.Args[0] 31667 if v_0_1_0.Op != OpAMD64CMPLconst || v_0_1_0.AuxInt != 64 { 31668 break 31669 } 31670 v_0_1_0_0 := v_0_1_0.Args[0] 31671 if v_0_1_0_0.Op != OpAMD64NEGL { 31672 break 31673 } 31674 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 31675 if v_0_1_0_0_0.Op != OpAMD64ADDLconst || v_0_1_0_0_0.AuxInt != -64 { 31676 break 31677 } 31678 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 31679 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst || v_0_1_0_0_0_0.AuxInt != 63 || y != v_0_1_0_0_0_0.Args[0] { 31680 break 31681 } 31682 v_1 := v.Args[1] 31683 if v_1.Op != OpAMD64SHRQ { 31684 break 31685 } 31686 _ = v_1.Args[1] 31687 if x != v_1.Args[0] || y != v_1.Args[1] { 31688 break 31689 } 31690 v.reset(OpAMD64RORQ) 31691 v.AddArg(x) 31692 v.AddArg(y) 31693 return true 31694 } 31695 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) 31696 // result: (RORQ x y) 31697 for { 31698 _ = v.Args[1] 31699 v_0 := v.Args[0] 31700 if v_0.Op != OpAMD64ANDQ { 31701 break 31702 } 31703 _ = v_0.Args[1] 31704 v_0_0 := v_0.Args[0] 31705 if v_0_0.Op != OpAMD64SBBQcarrymask { 31706 break 31707 } 31708 v_0_0_0 := v_0_0.Args[0] 31709 if v_0_0_0.Op != OpAMD64CMPLconst || v_0_0_0.AuxInt != 64 { 31710 break 31711 } 31712 v_0_0_0_0 := v_0_0_0.Args[0] 31713 if v_0_0_0_0.Op != OpAMD64NEGL { 31714 break 31715 } 31716 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 31717 if v_0_0_0_0_0.Op != OpAMD64ADDLconst || v_0_0_0_0_0.AuxInt != -64 { 31718 break 31719 } 31720 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 31721 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst || v_0_0_0_0_0_0.AuxInt != 63 { 31722 break 31723 } 31724 y := v_0_0_0_0_0_0.Args[0] 31725 v_0_1 := v_0.Args[1] 31726 if v_0_1.Op != OpAMD64SHLQ { 31727 break 31728 } 31729 _ = v_0_1.Args[1] 31730 x := v_0_1.Args[0] 31731 v_0_1_1 := v_0_1.Args[1] 31732 if v_0_1_1.Op != OpAMD64NEGL || y != v_0_1_1.Args[0] { 31733 break 31734 } 31735 v_1 := v.Args[1] 31736 if v_1.Op != OpAMD64SHRQ { 31737 break 31738 } 31739 _ = v_1.Args[1] 31740 if x != v_1.Args[0] || y != v_1.Args[1] { 31741 break 31742 } 31743 v.reset(OpAMD64RORQ) 31744 v.AddArg(x) 31745 v.AddArg(y) 31746 return true 31747 } 31748 // match: (ORQ x x) 31749 // result: x 31750 for { 31751 x := v.Args[1] 31752 if x != v.Args[0] { 31753 break 31754 } 31755 v.reset(OpCopy) 31756 v.Type = x.Type 31757 v.AddArg(x) 31758 return true 31759 } 31760 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 31761 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31762 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 31763 for { 31764 _ = v.Args[1] 31765 x0 := v.Args[0] 31766 if x0.Op != OpAMD64MOVBload { 31767 break 31768 } 31769 i0 := x0.AuxInt 31770 s := x0.Aux 31771 mem := x0.Args[1] 31772 p := x0.Args[0] 31773 sh := v.Args[1] 31774 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 31775 break 31776 } 31777 x1 := sh.Args[0] 31778 if x1.Op != OpAMD64MOVBload { 31779 break 31780 } 31781 i1 := x1.AuxInt 31782 if x1.Aux != s { 31783 break 31784 } 31785 _ = x1.Args[1] 31786 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31787 break 31788 } 31789 b = mergePoint(b, x0, x1) 31790 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 31791 v.reset(OpCopy) 31792 v.AddArg(v0) 31793 v0.AuxInt = i0 31794 v0.Aux = s 31795 v0.AddArg(p) 31796 v0.AddArg(mem) 31797 return true 31798 } 31799 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 31800 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31801 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 31802 for { 31803 _ = v.Args[1] 31804 sh := v.Args[0] 31805 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 31806 break 31807 } 31808 x1 := sh.Args[0] 31809 if x1.Op != OpAMD64MOVBload { 31810 break 31811 } 31812 i1 := x1.AuxInt 31813 s := x1.Aux 31814 mem := x1.Args[1] 31815 p := x1.Args[0] 31816 x0 := v.Args[1] 31817 if x0.Op != OpAMD64MOVBload { 31818 break 31819 } 31820 i0 := x0.AuxInt 31821 if x0.Aux != s { 31822 break 31823 } 31824 _ = x0.Args[1] 31825 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31826 break 31827 } 31828 b = mergePoint(b, x0, x1) 31829 v0 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 31830 v.reset(OpCopy) 31831 v.AddArg(v0) 31832 v0.AuxInt = i0 31833 v0.Aux = s 31834 v0.AddArg(p) 31835 v0.AddArg(mem) 31836 return true 31837 } 31838 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 31839 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31840 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 31841 for { 31842 _ = v.Args[1] 31843 x0 := v.Args[0] 31844 if x0.Op != OpAMD64MOVWload { 31845 break 31846 } 31847 i0 := x0.AuxInt 31848 s := x0.Aux 31849 mem := x0.Args[1] 31850 p := x0.Args[0] 31851 sh := v.Args[1] 31852 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 31853 break 31854 } 31855 x1 := sh.Args[0] 31856 if x1.Op != OpAMD64MOVWload { 31857 break 31858 } 31859 i1 := x1.AuxInt 31860 if x1.Aux != s { 31861 break 31862 } 31863 _ = x1.Args[1] 31864 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31865 break 31866 } 31867 b = mergePoint(b, x0, x1) 31868 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 31869 v.reset(OpCopy) 31870 v.AddArg(v0) 31871 v0.AuxInt = i0 31872 v0.Aux = s 31873 v0.AddArg(p) 31874 v0.AddArg(mem) 31875 return true 31876 } 31877 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 31878 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31879 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 31880 for { 31881 _ = v.Args[1] 31882 sh := v.Args[0] 31883 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 31884 break 31885 } 31886 x1 := sh.Args[0] 31887 if x1.Op != OpAMD64MOVWload { 31888 break 31889 } 31890 i1 := x1.AuxInt 31891 s := x1.Aux 31892 mem := x1.Args[1] 31893 p := x1.Args[0] 31894 x0 := v.Args[1] 31895 if x0.Op != OpAMD64MOVWload { 31896 break 31897 } 31898 i0 := x0.AuxInt 31899 if x0.Aux != s { 31900 break 31901 } 31902 _ = x0.Args[1] 31903 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31904 break 31905 } 31906 b = mergePoint(b, x0, x1) 31907 v0 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 31908 v.reset(OpCopy) 31909 v.AddArg(v0) 31910 v0.AuxInt = i0 31911 v0.Aux = s 31912 v0.AddArg(p) 31913 v0.AddArg(mem) 31914 return true 31915 } 31916 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 31917 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31918 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 31919 for { 31920 _ = v.Args[1] 31921 x0 := v.Args[0] 31922 if x0.Op != OpAMD64MOVLload { 31923 break 31924 } 31925 i0 := x0.AuxInt 31926 s := x0.Aux 31927 mem := x0.Args[1] 31928 p := x0.Args[0] 31929 sh := v.Args[1] 31930 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 31931 break 31932 } 31933 x1 := sh.Args[0] 31934 if x1.Op != OpAMD64MOVLload { 31935 break 31936 } 31937 i1 := x1.AuxInt 31938 if x1.Aux != s { 31939 break 31940 } 31941 _ = x1.Args[1] 31942 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31943 break 31944 } 31945 b = mergePoint(b, x0, x1) 31946 v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) 31947 v.reset(OpCopy) 31948 v.AddArg(v0) 31949 v0.AuxInt = i0 31950 v0.Aux = s 31951 v0.AddArg(p) 31952 v0.AddArg(mem) 31953 return true 31954 } 31955 return false 31956 } 31957 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { 31958 b := v.Block 31959 typ := &b.Func.Config.Types 31960 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 31961 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 31962 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 31963 for { 31964 _ = v.Args[1] 31965 sh := v.Args[0] 31966 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 31967 break 31968 } 31969 x1 := sh.Args[0] 31970 if x1.Op != OpAMD64MOVLload { 31971 break 31972 } 31973 i1 := x1.AuxInt 31974 s := x1.Aux 31975 mem := x1.Args[1] 31976 p := x1.Args[0] 31977 x0 := v.Args[1] 31978 if x0.Op != OpAMD64MOVLload { 31979 break 31980 } 31981 i0 := x0.AuxInt 31982 if x0.Aux != s { 31983 break 31984 } 31985 _ = x0.Args[1] 31986 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 31987 break 31988 } 31989 b = mergePoint(b, x0, x1) 31990 v0 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) 31991 v.reset(OpCopy) 31992 v.AddArg(v0) 31993 v0.AuxInt = i0 31994 v0.Aux = s 31995 v0.AddArg(p) 31996 v0.AddArg(mem) 31997 return true 31998 } 31999 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 32000 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32001 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 32002 for { 32003 _ = v.Args[1] 32004 s1 := v.Args[0] 32005 if s1.Op != OpAMD64SHLQconst { 32006 break 32007 } 32008 j1 := s1.AuxInt 32009 x1 := s1.Args[0] 32010 if x1.Op != OpAMD64MOVBload { 32011 break 32012 } 32013 i1 := x1.AuxInt 32014 s := x1.Aux 32015 mem := x1.Args[1] 32016 p := x1.Args[0] 32017 or := v.Args[1] 32018 if or.Op != OpAMD64ORQ { 32019 break 32020 } 32021 y := or.Args[1] 32022 s0 := or.Args[0] 32023 if s0.Op != OpAMD64SHLQconst { 32024 break 32025 } 32026 j0 := s0.AuxInt 32027 x0 := s0.Args[0] 32028 if x0.Op != OpAMD64MOVBload { 32029 break 32030 } 32031 i0 := x0.AuxInt 32032 if x0.Aux != s { 32033 break 32034 } 32035 _ = x0.Args[1] 32036 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32037 break 32038 } 32039 b = mergePoint(b, x0, x1, y) 32040 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 32041 v.reset(OpCopy) 32042 v.AddArg(v0) 32043 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 32044 v1.AuxInt = j0 32045 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 32046 v2.AuxInt = i0 32047 v2.Aux = s 32048 v2.AddArg(p) 32049 v2.AddArg(mem) 32050 v1.AddArg(v2) 32051 v0.AddArg(v1) 32052 v0.AddArg(y) 32053 return true 32054 } 32055 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 32056 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32057 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 32058 for { 32059 _ = v.Args[1] 32060 s1 := v.Args[0] 32061 if s1.Op != OpAMD64SHLQconst { 32062 break 32063 } 32064 j1 := s1.AuxInt 32065 x1 := s1.Args[0] 32066 if x1.Op != OpAMD64MOVBload { 32067 break 32068 } 32069 i1 := x1.AuxInt 32070 s := x1.Aux 32071 mem := x1.Args[1] 32072 p := x1.Args[0] 32073 or := v.Args[1] 32074 if or.Op != OpAMD64ORQ { 32075 break 32076 } 32077 _ = or.Args[1] 32078 y := or.Args[0] 32079 s0 := or.Args[1] 32080 if s0.Op != OpAMD64SHLQconst { 32081 break 32082 } 32083 j0 := s0.AuxInt 32084 x0 := s0.Args[0] 32085 if x0.Op != OpAMD64MOVBload { 32086 break 32087 } 32088 i0 := x0.AuxInt 32089 if x0.Aux != s { 32090 break 32091 } 32092 _ = x0.Args[1] 32093 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32094 break 32095 } 32096 b = mergePoint(b, x0, x1, y) 32097 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 32098 v.reset(OpCopy) 32099 v.AddArg(v0) 32100 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 32101 v1.AuxInt = j0 32102 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 32103 v2.AuxInt = i0 32104 v2.Aux = s 32105 v2.AddArg(p) 32106 v2.AddArg(mem) 32107 v1.AddArg(v2) 32108 v0.AddArg(v1) 32109 v0.AddArg(y) 32110 return true 32111 } 32112 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 32113 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32114 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 32115 for { 32116 _ = v.Args[1] 32117 or := v.Args[0] 32118 if or.Op != OpAMD64ORQ { 32119 break 32120 } 32121 y := or.Args[1] 32122 s0 := or.Args[0] 32123 if s0.Op != OpAMD64SHLQconst { 32124 break 32125 } 32126 j0 := s0.AuxInt 32127 x0 := s0.Args[0] 32128 if x0.Op != OpAMD64MOVBload { 32129 break 32130 } 32131 i0 := x0.AuxInt 32132 s := x0.Aux 32133 mem := x0.Args[1] 32134 p := x0.Args[0] 32135 s1 := v.Args[1] 32136 if s1.Op != OpAMD64SHLQconst { 32137 break 32138 } 32139 j1 := s1.AuxInt 32140 x1 := s1.Args[0] 32141 if x1.Op != OpAMD64MOVBload { 32142 break 32143 } 32144 i1 := x1.AuxInt 32145 if x1.Aux != s { 32146 break 32147 } 32148 _ = x1.Args[1] 32149 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32150 break 32151 } 32152 b = mergePoint(b, x0, x1, y) 32153 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 32154 v.reset(OpCopy) 32155 v.AddArg(v0) 32156 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 32157 v1.AuxInt = j0 32158 v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 32159 v2.AuxInt = i0 32160 v2.Aux = s 32161 v2.AddArg(p) 32162 v2.AddArg(mem) 32163 v1.AddArg(v2) 32164 v0.AddArg(v1) 32165 v0.AddArg(y) 32166 return true 32167 } 32168 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 32169 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32170 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 32171 for { 32172 _ = v.Args[1] 32173 or := v.Args[0] 32174 if or.Op != OpAMD64ORQ { 32175 break 32176 } 32177 _ = or.Args[1] 32178 y := or.Args[0] 32179 s0 := or.Args[1] 32180 if s0.Op != OpAMD64SHLQconst { 32181 break 32182 } 32183 j0 := s0.AuxInt 32184 x0 := s0.Args[0] 32185 if x0.Op != OpAMD64MOVBload { 32186 break 32187 } 32188 i0 := x0.AuxInt 32189 s := x0.Aux 32190 mem := x0.Args[1] 32191 p := x0.Args[0] 32192 s1 := v.Args[1] 32193 if s1.Op != OpAMD64SHLQconst { 32194 break 32195 } 32196 j1 := s1.AuxInt 32197 x1 := s1.Args[0] 32198 if x1.Op != OpAMD64MOVBload { 32199 break 32200 } 32201 i1 := x1.AuxInt 32202 if x1.Aux != s { 32203 break 32204 } 32205 _ = x1.Args[1] 32206 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32207 break 32208 } 32209 b = mergePoint(b, x0, x1, y) 32210 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 32211 v.reset(OpCopy) 32212 v.AddArg(v0) 32213 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 32214 v1.AuxInt = j0 32215 v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 32216 v2.AuxInt = i0 32217 v2.Aux = s 32218 v2.AddArg(p) 32219 v2.AddArg(mem) 32220 v1.AddArg(v2) 32221 v0.AddArg(v1) 32222 v0.AddArg(y) 32223 return true 32224 } 32225 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 32226 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32227 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 32228 for { 32229 _ = v.Args[1] 32230 s1 := v.Args[0] 32231 if s1.Op != OpAMD64SHLQconst { 32232 break 32233 } 32234 j1 := s1.AuxInt 32235 x1 := s1.Args[0] 32236 if x1.Op != OpAMD64MOVWload { 32237 break 32238 } 32239 i1 := x1.AuxInt 32240 s := x1.Aux 32241 mem := x1.Args[1] 32242 p := x1.Args[0] 32243 or := v.Args[1] 32244 if or.Op != OpAMD64ORQ { 32245 break 32246 } 32247 y := or.Args[1] 32248 s0 := or.Args[0] 32249 if s0.Op != OpAMD64SHLQconst { 32250 break 32251 } 32252 j0 := s0.AuxInt 32253 x0 := s0.Args[0] 32254 if x0.Op != OpAMD64MOVWload { 32255 break 32256 } 32257 i0 := x0.AuxInt 32258 if x0.Aux != s { 32259 break 32260 } 32261 _ = x0.Args[1] 32262 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32263 break 32264 } 32265 b = mergePoint(b, x0, x1, y) 32266 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 32267 v.reset(OpCopy) 32268 v.AddArg(v0) 32269 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 32270 v1.AuxInt = j0 32271 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 32272 v2.AuxInt = i0 32273 v2.Aux = s 32274 v2.AddArg(p) 32275 v2.AddArg(mem) 32276 v1.AddArg(v2) 32277 v0.AddArg(v1) 32278 v0.AddArg(y) 32279 return true 32280 } 32281 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 32282 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32283 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 32284 for { 32285 _ = v.Args[1] 32286 s1 := v.Args[0] 32287 if s1.Op != OpAMD64SHLQconst { 32288 break 32289 } 32290 j1 := s1.AuxInt 32291 x1 := s1.Args[0] 32292 if x1.Op != OpAMD64MOVWload { 32293 break 32294 } 32295 i1 := x1.AuxInt 32296 s := x1.Aux 32297 mem := x1.Args[1] 32298 p := x1.Args[0] 32299 or := v.Args[1] 32300 if or.Op != OpAMD64ORQ { 32301 break 32302 } 32303 _ = or.Args[1] 32304 y := or.Args[0] 32305 s0 := or.Args[1] 32306 if s0.Op != OpAMD64SHLQconst { 32307 break 32308 } 32309 j0 := s0.AuxInt 32310 x0 := s0.Args[0] 32311 if x0.Op != OpAMD64MOVWload { 32312 break 32313 } 32314 i0 := x0.AuxInt 32315 if x0.Aux != s { 32316 break 32317 } 32318 _ = x0.Args[1] 32319 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32320 break 32321 } 32322 b = mergePoint(b, x0, x1, y) 32323 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 32324 v.reset(OpCopy) 32325 v.AddArg(v0) 32326 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 32327 v1.AuxInt = j0 32328 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 32329 v2.AuxInt = i0 32330 v2.Aux = s 32331 v2.AddArg(p) 32332 v2.AddArg(mem) 32333 v1.AddArg(v2) 32334 v0.AddArg(v1) 32335 v0.AddArg(y) 32336 return true 32337 } 32338 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 32339 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32340 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 32341 for { 32342 _ = v.Args[1] 32343 or := v.Args[0] 32344 if or.Op != OpAMD64ORQ { 32345 break 32346 } 32347 y := or.Args[1] 32348 s0 := or.Args[0] 32349 if s0.Op != OpAMD64SHLQconst { 32350 break 32351 } 32352 j0 := s0.AuxInt 32353 x0 := s0.Args[0] 32354 if x0.Op != OpAMD64MOVWload { 32355 break 32356 } 32357 i0 := x0.AuxInt 32358 s := x0.Aux 32359 mem := x0.Args[1] 32360 p := x0.Args[0] 32361 s1 := v.Args[1] 32362 if s1.Op != OpAMD64SHLQconst { 32363 break 32364 } 32365 j1 := s1.AuxInt 32366 x1 := s1.Args[0] 32367 if x1.Op != OpAMD64MOVWload { 32368 break 32369 } 32370 i1 := x1.AuxInt 32371 if x1.Aux != s { 32372 break 32373 } 32374 _ = x1.Args[1] 32375 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32376 break 32377 } 32378 b = mergePoint(b, x0, x1, y) 32379 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 32380 v.reset(OpCopy) 32381 v.AddArg(v0) 32382 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 32383 v1.AuxInt = j0 32384 v2 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 32385 v2.AuxInt = i0 32386 v2.Aux = s 32387 v2.AddArg(p) 32388 v2.AddArg(mem) 32389 v1.AddArg(v2) 32390 v0.AddArg(v1) 32391 v0.AddArg(y) 32392 return true 32393 } 32394 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 32395 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32396 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 32397 for { 32398 _ = v.Args[1] 32399 or := v.Args[0] 32400 if or.Op != OpAMD64ORQ { 32401 break 32402 } 32403 _ = or.Args[1] 32404 y := or.Args[0] 32405 s0 := or.Args[1] 32406 if s0.Op != OpAMD64SHLQconst { 32407 break 32408 } 32409 j0 := s0.AuxInt 32410 x0 := s0.Args[0] 32411 if x0.Op != OpAMD64MOVWload { 32412 break 32413 } 32414 i0 := x0.AuxInt 32415 s := x0.Aux 32416 mem := x0.Args[1] 32417 p := x0.Args[0] 32418 s1 := v.Args[1] 32419 if s1.Op != OpAMD64SHLQconst { 32420 break 32421 } 32422 j1 := s1.AuxInt 32423 x1 := s1.Args[0] 32424 if x1.Op != OpAMD64MOVWload { 32425 break 32426 } 32427 i1 := x1.AuxInt 32428 if x1.Aux != s { 32429 break 32430 } 32431 _ = x1.Args[1] 32432 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32433 break 32434 } 32435 b = mergePoint(b, x0, x1, y) 32436 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 32437 v.reset(OpCopy) 32438 v.AddArg(v0) 32439 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 32440 v1.AuxInt = j0 32441 v2 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 32442 v2.AuxInt = i0 32443 v2.Aux = s 32444 v2.AddArg(p) 32445 v2.AddArg(mem) 32446 v1.AddArg(v2) 32447 v0.AddArg(v1) 32448 v0.AddArg(y) 32449 return true 32450 } 32451 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 32452 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32453 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 32454 for { 32455 _ = v.Args[1] 32456 x0 := v.Args[0] 32457 if x0.Op != OpAMD64MOVBloadidx1 { 32458 break 32459 } 32460 i0 := x0.AuxInt 32461 s := x0.Aux 32462 mem := x0.Args[2] 32463 p := x0.Args[0] 32464 idx := x0.Args[1] 32465 sh := v.Args[1] 32466 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 32467 break 32468 } 32469 x1 := sh.Args[0] 32470 if x1.Op != OpAMD64MOVBloadidx1 { 32471 break 32472 } 32473 i1 := x1.AuxInt 32474 if x1.Aux != s { 32475 break 32476 } 32477 _ = x1.Args[2] 32478 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32479 break 32480 } 32481 b = mergePoint(b, x0, x1) 32482 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 32483 v.reset(OpCopy) 32484 v.AddArg(v0) 32485 v0.AuxInt = i0 32486 v0.Aux = s 32487 v0.AddArg(p) 32488 v0.AddArg(idx) 32489 v0.AddArg(mem) 32490 return true 32491 } 32492 return false 32493 } 32494 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { 32495 b := v.Block 32496 typ := &b.Func.Config.Types 32497 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 32498 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32499 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 32500 for { 32501 _ = v.Args[1] 32502 x0 := v.Args[0] 32503 if x0.Op != OpAMD64MOVBloadidx1 { 32504 break 32505 } 32506 i0 := x0.AuxInt 32507 s := x0.Aux 32508 mem := x0.Args[2] 32509 idx := x0.Args[0] 32510 p := x0.Args[1] 32511 sh := v.Args[1] 32512 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 32513 break 32514 } 32515 x1 := sh.Args[0] 32516 if x1.Op != OpAMD64MOVBloadidx1 { 32517 break 32518 } 32519 i1 := x1.AuxInt 32520 if x1.Aux != s { 32521 break 32522 } 32523 _ = x1.Args[2] 32524 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32525 break 32526 } 32527 b = mergePoint(b, x0, x1) 32528 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 32529 v.reset(OpCopy) 32530 v.AddArg(v0) 32531 v0.AuxInt = i0 32532 v0.Aux = s 32533 v0.AddArg(p) 32534 v0.AddArg(idx) 32535 v0.AddArg(mem) 32536 return true 32537 } 32538 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 32539 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32540 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 32541 for { 32542 _ = v.Args[1] 32543 x0 := v.Args[0] 32544 if x0.Op != OpAMD64MOVBloadidx1 { 32545 break 32546 } 32547 i0 := x0.AuxInt 32548 s := x0.Aux 32549 mem := x0.Args[2] 32550 p := x0.Args[0] 32551 idx := x0.Args[1] 32552 sh := v.Args[1] 32553 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 32554 break 32555 } 32556 x1 := sh.Args[0] 32557 if x1.Op != OpAMD64MOVBloadidx1 { 32558 break 32559 } 32560 i1 := x1.AuxInt 32561 if x1.Aux != s { 32562 break 32563 } 32564 _ = x1.Args[2] 32565 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32566 break 32567 } 32568 b = mergePoint(b, x0, x1) 32569 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 32570 v.reset(OpCopy) 32571 v.AddArg(v0) 32572 v0.AuxInt = i0 32573 v0.Aux = s 32574 v0.AddArg(p) 32575 v0.AddArg(idx) 32576 v0.AddArg(mem) 32577 return true 32578 } 32579 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 32580 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32581 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 32582 for { 32583 _ = v.Args[1] 32584 x0 := v.Args[0] 32585 if x0.Op != OpAMD64MOVBloadidx1 { 32586 break 32587 } 32588 i0 := x0.AuxInt 32589 s := x0.Aux 32590 mem := x0.Args[2] 32591 idx := x0.Args[0] 32592 p := x0.Args[1] 32593 sh := v.Args[1] 32594 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 32595 break 32596 } 32597 x1 := sh.Args[0] 32598 if x1.Op != OpAMD64MOVBloadidx1 { 32599 break 32600 } 32601 i1 := x1.AuxInt 32602 if x1.Aux != s { 32603 break 32604 } 32605 _ = x1.Args[2] 32606 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32607 break 32608 } 32609 b = mergePoint(b, x0, x1) 32610 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 32611 v.reset(OpCopy) 32612 v.AddArg(v0) 32613 v0.AuxInt = i0 32614 v0.Aux = s 32615 v0.AddArg(p) 32616 v0.AddArg(idx) 32617 v0.AddArg(mem) 32618 return true 32619 } 32620 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 32621 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32622 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 32623 for { 32624 _ = v.Args[1] 32625 sh := v.Args[0] 32626 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 32627 break 32628 } 32629 x1 := sh.Args[0] 32630 if x1.Op != OpAMD64MOVBloadidx1 { 32631 break 32632 } 32633 i1 := x1.AuxInt 32634 s := x1.Aux 32635 mem := x1.Args[2] 32636 p := x1.Args[0] 32637 idx := x1.Args[1] 32638 x0 := v.Args[1] 32639 if x0.Op != OpAMD64MOVBloadidx1 { 32640 break 32641 } 32642 i0 := x0.AuxInt 32643 if x0.Aux != s { 32644 break 32645 } 32646 _ = x0.Args[2] 32647 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32648 break 32649 } 32650 b = mergePoint(b, x0, x1) 32651 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 32652 v.reset(OpCopy) 32653 v.AddArg(v0) 32654 v0.AuxInt = i0 32655 v0.Aux = s 32656 v0.AddArg(p) 32657 v0.AddArg(idx) 32658 v0.AddArg(mem) 32659 return true 32660 } 32661 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 32662 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32663 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 32664 for { 32665 _ = v.Args[1] 32666 sh := v.Args[0] 32667 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 32668 break 32669 } 32670 x1 := sh.Args[0] 32671 if x1.Op != OpAMD64MOVBloadidx1 { 32672 break 32673 } 32674 i1 := x1.AuxInt 32675 s := x1.Aux 32676 mem := x1.Args[2] 32677 idx := x1.Args[0] 32678 p := x1.Args[1] 32679 x0 := v.Args[1] 32680 if x0.Op != OpAMD64MOVBloadidx1 { 32681 break 32682 } 32683 i0 := x0.AuxInt 32684 if x0.Aux != s { 32685 break 32686 } 32687 _ = x0.Args[2] 32688 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32689 break 32690 } 32691 b = mergePoint(b, x0, x1) 32692 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 32693 v.reset(OpCopy) 32694 v.AddArg(v0) 32695 v0.AuxInt = i0 32696 v0.Aux = s 32697 v0.AddArg(p) 32698 v0.AddArg(idx) 32699 v0.AddArg(mem) 32700 return true 32701 } 32702 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 32703 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32704 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 32705 for { 32706 _ = v.Args[1] 32707 sh := v.Args[0] 32708 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 32709 break 32710 } 32711 x1 := sh.Args[0] 32712 if x1.Op != OpAMD64MOVBloadidx1 { 32713 break 32714 } 32715 i1 := x1.AuxInt 32716 s := x1.Aux 32717 mem := x1.Args[2] 32718 p := x1.Args[0] 32719 idx := x1.Args[1] 32720 x0 := v.Args[1] 32721 if x0.Op != OpAMD64MOVBloadidx1 { 32722 break 32723 } 32724 i0 := x0.AuxInt 32725 if x0.Aux != s { 32726 break 32727 } 32728 _ = x0.Args[2] 32729 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32730 break 32731 } 32732 b = mergePoint(b, x0, x1) 32733 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 32734 v.reset(OpCopy) 32735 v.AddArg(v0) 32736 v0.AuxInt = i0 32737 v0.Aux = s 32738 v0.AddArg(p) 32739 v0.AddArg(idx) 32740 v0.AddArg(mem) 32741 return true 32742 } 32743 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 32744 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32745 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 32746 for { 32747 _ = v.Args[1] 32748 sh := v.Args[0] 32749 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 32750 break 32751 } 32752 x1 := sh.Args[0] 32753 if x1.Op != OpAMD64MOVBloadidx1 { 32754 break 32755 } 32756 i1 := x1.AuxInt 32757 s := x1.Aux 32758 mem := x1.Args[2] 32759 idx := x1.Args[0] 32760 p := x1.Args[1] 32761 x0 := v.Args[1] 32762 if x0.Op != OpAMD64MOVBloadidx1 { 32763 break 32764 } 32765 i0 := x0.AuxInt 32766 if x0.Aux != s { 32767 break 32768 } 32769 _ = x0.Args[2] 32770 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32771 break 32772 } 32773 b = mergePoint(b, x0, x1) 32774 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 32775 v.reset(OpCopy) 32776 v.AddArg(v0) 32777 v0.AuxInt = i0 32778 v0.Aux = s 32779 v0.AddArg(p) 32780 v0.AddArg(idx) 32781 v0.AddArg(mem) 32782 return true 32783 } 32784 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 32785 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32786 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 32787 for { 32788 _ = v.Args[1] 32789 x0 := v.Args[0] 32790 if x0.Op != OpAMD64MOVWloadidx1 { 32791 break 32792 } 32793 i0 := x0.AuxInt 32794 s := x0.Aux 32795 mem := x0.Args[2] 32796 p := x0.Args[0] 32797 idx := x0.Args[1] 32798 sh := v.Args[1] 32799 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 32800 break 32801 } 32802 x1 := sh.Args[0] 32803 if x1.Op != OpAMD64MOVWloadidx1 { 32804 break 32805 } 32806 i1 := x1.AuxInt 32807 if x1.Aux != s { 32808 break 32809 } 32810 _ = x1.Args[2] 32811 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32812 break 32813 } 32814 b = mergePoint(b, x0, x1) 32815 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32816 v.reset(OpCopy) 32817 v.AddArg(v0) 32818 v0.AuxInt = i0 32819 v0.Aux = s 32820 v0.AddArg(p) 32821 v0.AddArg(idx) 32822 v0.AddArg(mem) 32823 return true 32824 } 32825 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 32826 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32827 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 32828 for { 32829 _ = v.Args[1] 32830 x0 := v.Args[0] 32831 if x0.Op != OpAMD64MOVWloadidx1 { 32832 break 32833 } 32834 i0 := x0.AuxInt 32835 s := x0.Aux 32836 mem := x0.Args[2] 32837 idx := x0.Args[0] 32838 p := x0.Args[1] 32839 sh := v.Args[1] 32840 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 32841 break 32842 } 32843 x1 := sh.Args[0] 32844 if x1.Op != OpAMD64MOVWloadidx1 { 32845 break 32846 } 32847 i1 := x1.AuxInt 32848 if x1.Aux != s { 32849 break 32850 } 32851 _ = x1.Args[2] 32852 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32853 break 32854 } 32855 b = mergePoint(b, x0, x1) 32856 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32857 v.reset(OpCopy) 32858 v.AddArg(v0) 32859 v0.AuxInt = i0 32860 v0.Aux = s 32861 v0.AddArg(p) 32862 v0.AddArg(idx) 32863 v0.AddArg(mem) 32864 return true 32865 } 32866 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 32867 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32868 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 32869 for { 32870 _ = v.Args[1] 32871 x0 := v.Args[0] 32872 if x0.Op != OpAMD64MOVWloadidx1 { 32873 break 32874 } 32875 i0 := x0.AuxInt 32876 s := x0.Aux 32877 mem := x0.Args[2] 32878 p := x0.Args[0] 32879 idx := x0.Args[1] 32880 sh := v.Args[1] 32881 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 32882 break 32883 } 32884 x1 := sh.Args[0] 32885 if x1.Op != OpAMD64MOVWloadidx1 { 32886 break 32887 } 32888 i1 := x1.AuxInt 32889 if x1.Aux != s { 32890 break 32891 } 32892 _ = x1.Args[2] 32893 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32894 break 32895 } 32896 b = mergePoint(b, x0, x1) 32897 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32898 v.reset(OpCopy) 32899 v.AddArg(v0) 32900 v0.AuxInt = i0 32901 v0.Aux = s 32902 v0.AddArg(p) 32903 v0.AddArg(idx) 32904 v0.AddArg(mem) 32905 return true 32906 } 32907 return false 32908 } 32909 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { 32910 b := v.Block 32911 typ := &b.Func.Config.Types 32912 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 32913 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32914 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 32915 for { 32916 _ = v.Args[1] 32917 x0 := v.Args[0] 32918 if x0.Op != OpAMD64MOVWloadidx1 { 32919 break 32920 } 32921 i0 := x0.AuxInt 32922 s := x0.Aux 32923 mem := x0.Args[2] 32924 idx := x0.Args[0] 32925 p := x0.Args[1] 32926 sh := v.Args[1] 32927 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 32928 break 32929 } 32930 x1 := sh.Args[0] 32931 if x1.Op != OpAMD64MOVWloadidx1 { 32932 break 32933 } 32934 i1 := x1.AuxInt 32935 if x1.Aux != s { 32936 break 32937 } 32938 _ = x1.Args[2] 32939 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32940 break 32941 } 32942 b = mergePoint(b, x0, x1) 32943 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32944 v.reset(OpCopy) 32945 v.AddArg(v0) 32946 v0.AuxInt = i0 32947 v0.Aux = s 32948 v0.AddArg(p) 32949 v0.AddArg(idx) 32950 v0.AddArg(mem) 32951 return true 32952 } 32953 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 32954 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32955 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 32956 for { 32957 _ = v.Args[1] 32958 sh := v.Args[0] 32959 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 32960 break 32961 } 32962 x1 := sh.Args[0] 32963 if x1.Op != OpAMD64MOVWloadidx1 { 32964 break 32965 } 32966 i1 := x1.AuxInt 32967 s := x1.Aux 32968 mem := x1.Args[2] 32969 p := x1.Args[0] 32970 idx := x1.Args[1] 32971 x0 := v.Args[1] 32972 if x0.Op != OpAMD64MOVWloadidx1 { 32973 break 32974 } 32975 i0 := x0.AuxInt 32976 if x0.Aux != s { 32977 break 32978 } 32979 _ = x0.Args[2] 32980 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 32981 break 32982 } 32983 b = mergePoint(b, x0, x1) 32984 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32985 v.reset(OpCopy) 32986 v.AddArg(v0) 32987 v0.AuxInt = i0 32988 v0.Aux = s 32989 v0.AddArg(p) 32990 v0.AddArg(idx) 32991 v0.AddArg(mem) 32992 return true 32993 } 32994 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 32995 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 32996 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 32997 for { 32998 _ = v.Args[1] 32999 sh := v.Args[0] 33000 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 33001 break 33002 } 33003 x1 := sh.Args[0] 33004 if x1.Op != OpAMD64MOVWloadidx1 { 33005 break 33006 } 33007 i1 := x1.AuxInt 33008 s := x1.Aux 33009 mem := x1.Args[2] 33010 idx := x1.Args[0] 33011 p := x1.Args[1] 33012 x0 := v.Args[1] 33013 if x0.Op != OpAMD64MOVWloadidx1 { 33014 break 33015 } 33016 i0 := x0.AuxInt 33017 if x0.Aux != s { 33018 break 33019 } 33020 _ = x0.Args[2] 33021 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33022 break 33023 } 33024 b = mergePoint(b, x0, x1) 33025 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33026 v.reset(OpCopy) 33027 v.AddArg(v0) 33028 v0.AuxInt = i0 33029 v0.Aux = s 33030 v0.AddArg(p) 33031 v0.AddArg(idx) 33032 v0.AddArg(mem) 33033 return true 33034 } 33035 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 33036 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33037 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 33038 for { 33039 _ = v.Args[1] 33040 sh := v.Args[0] 33041 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 33042 break 33043 } 33044 x1 := sh.Args[0] 33045 if x1.Op != OpAMD64MOVWloadidx1 { 33046 break 33047 } 33048 i1 := x1.AuxInt 33049 s := x1.Aux 33050 mem := x1.Args[2] 33051 p := x1.Args[0] 33052 idx := x1.Args[1] 33053 x0 := v.Args[1] 33054 if x0.Op != OpAMD64MOVWloadidx1 { 33055 break 33056 } 33057 i0 := x0.AuxInt 33058 if x0.Aux != s { 33059 break 33060 } 33061 _ = x0.Args[2] 33062 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33063 break 33064 } 33065 b = mergePoint(b, x0, x1) 33066 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33067 v.reset(OpCopy) 33068 v.AddArg(v0) 33069 v0.AuxInt = i0 33070 v0.Aux = s 33071 v0.AddArg(p) 33072 v0.AddArg(idx) 33073 v0.AddArg(mem) 33074 return true 33075 } 33076 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 33077 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33078 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 33079 for { 33080 _ = v.Args[1] 33081 sh := v.Args[0] 33082 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 33083 break 33084 } 33085 x1 := sh.Args[0] 33086 if x1.Op != OpAMD64MOVWloadidx1 { 33087 break 33088 } 33089 i1 := x1.AuxInt 33090 s := x1.Aux 33091 mem := x1.Args[2] 33092 idx := x1.Args[0] 33093 p := x1.Args[1] 33094 x0 := v.Args[1] 33095 if x0.Op != OpAMD64MOVWloadidx1 { 33096 break 33097 } 33098 i0 := x0.AuxInt 33099 if x0.Aux != s { 33100 break 33101 } 33102 _ = x0.Args[2] 33103 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33104 break 33105 } 33106 b = mergePoint(b, x0, x1) 33107 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33108 v.reset(OpCopy) 33109 v.AddArg(v0) 33110 v0.AuxInt = i0 33111 v0.Aux = s 33112 v0.AddArg(p) 33113 v0.AddArg(idx) 33114 v0.AddArg(mem) 33115 return true 33116 } 33117 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 33118 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33119 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 33120 for { 33121 _ = v.Args[1] 33122 x0 := v.Args[0] 33123 if x0.Op != OpAMD64MOVLloadidx1 { 33124 break 33125 } 33126 i0 := x0.AuxInt 33127 s := x0.Aux 33128 mem := x0.Args[2] 33129 p := x0.Args[0] 33130 idx := x0.Args[1] 33131 sh := v.Args[1] 33132 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 33133 break 33134 } 33135 x1 := sh.Args[0] 33136 if x1.Op != OpAMD64MOVLloadidx1 { 33137 break 33138 } 33139 i1 := x1.AuxInt 33140 if x1.Aux != s { 33141 break 33142 } 33143 _ = x1.Args[2] 33144 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33145 break 33146 } 33147 b = mergePoint(b, x0, x1) 33148 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 33149 v.reset(OpCopy) 33150 v.AddArg(v0) 33151 v0.AuxInt = i0 33152 v0.Aux = s 33153 v0.AddArg(p) 33154 v0.AddArg(idx) 33155 v0.AddArg(mem) 33156 return true 33157 } 33158 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 33159 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33160 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 33161 for { 33162 _ = v.Args[1] 33163 x0 := v.Args[0] 33164 if x0.Op != OpAMD64MOVLloadidx1 { 33165 break 33166 } 33167 i0 := x0.AuxInt 33168 s := x0.Aux 33169 mem := x0.Args[2] 33170 idx := x0.Args[0] 33171 p := x0.Args[1] 33172 sh := v.Args[1] 33173 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 33174 break 33175 } 33176 x1 := sh.Args[0] 33177 if x1.Op != OpAMD64MOVLloadidx1 { 33178 break 33179 } 33180 i1 := x1.AuxInt 33181 if x1.Aux != s { 33182 break 33183 } 33184 _ = x1.Args[2] 33185 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33186 break 33187 } 33188 b = mergePoint(b, x0, x1) 33189 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 33190 v.reset(OpCopy) 33191 v.AddArg(v0) 33192 v0.AuxInt = i0 33193 v0.Aux = s 33194 v0.AddArg(p) 33195 v0.AddArg(idx) 33196 v0.AddArg(mem) 33197 return true 33198 } 33199 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 33200 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33201 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 33202 for { 33203 _ = v.Args[1] 33204 x0 := v.Args[0] 33205 if x0.Op != OpAMD64MOVLloadidx1 { 33206 break 33207 } 33208 i0 := x0.AuxInt 33209 s := x0.Aux 33210 mem := x0.Args[2] 33211 p := x0.Args[0] 33212 idx := x0.Args[1] 33213 sh := v.Args[1] 33214 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 33215 break 33216 } 33217 x1 := sh.Args[0] 33218 if x1.Op != OpAMD64MOVLloadidx1 { 33219 break 33220 } 33221 i1 := x1.AuxInt 33222 if x1.Aux != s { 33223 break 33224 } 33225 _ = x1.Args[2] 33226 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33227 break 33228 } 33229 b = mergePoint(b, x0, x1) 33230 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 33231 v.reset(OpCopy) 33232 v.AddArg(v0) 33233 v0.AuxInt = i0 33234 v0.Aux = s 33235 v0.AddArg(p) 33236 v0.AddArg(idx) 33237 v0.AddArg(mem) 33238 return true 33239 } 33240 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 33241 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33242 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 33243 for { 33244 _ = v.Args[1] 33245 x0 := v.Args[0] 33246 if x0.Op != OpAMD64MOVLloadidx1 { 33247 break 33248 } 33249 i0 := x0.AuxInt 33250 s := x0.Aux 33251 mem := x0.Args[2] 33252 idx := x0.Args[0] 33253 p := x0.Args[1] 33254 sh := v.Args[1] 33255 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 33256 break 33257 } 33258 x1 := sh.Args[0] 33259 if x1.Op != OpAMD64MOVLloadidx1 { 33260 break 33261 } 33262 i1 := x1.AuxInt 33263 if x1.Aux != s { 33264 break 33265 } 33266 _ = x1.Args[2] 33267 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33268 break 33269 } 33270 b = mergePoint(b, x0, x1) 33271 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 33272 v.reset(OpCopy) 33273 v.AddArg(v0) 33274 v0.AuxInt = i0 33275 v0.Aux = s 33276 v0.AddArg(p) 33277 v0.AddArg(idx) 33278 v0.AddArg(mem) 33279 return true 33280 } 33281 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 33282 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33283 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 33284 for { 33285 _ = v.Args[1] 33286 sh := v.Args[0] 33287 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 33288 break 33289 } 33290 x1 := sh.Args[0] 33291 if x1.Op != OpAMD64MOVLloadidx1 { 33292 break 33293 } 33294 i1 := x1.AuxInt 33295 s := x1.Aux 33296 mem := x1.Args[2] 33297 p := x1.Args[0] 33298 idx := x1.Args[1] 33299 x0 := v.Args[1] 33300 if x0.Op != OpAMD64MOVLloadidx1 { 33301 break 33302 } 33303 i0 := x0.AuxInt 33304 if x0.Aux != s { 33305 break 33306 } 33307 _ = x0.Args[2] 33308 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33309 break 33310 } 33311 b = mergePoint(b, x0, x1) 33312 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 33313 v.reset(OpCopy) 33314 v.AddArg(v0) 33315 v0.AuxInt = i0 33316 v0.Aux = s 33317 v0.AddArg(p) 33318 v0.AddArg(idx) 33319 v0.AddArg(mem) 33320 return true 33321 } 33322 return false 33323 } 33324 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { 33325 b := v.Block 33326 typ := &b.Func.Config.Types 33327 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 33328 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33329 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 33330 for { 33331 _ = v.Args[1] 33332 sh := v.Args[0] 33333 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 33334 break 33335 } 33336 x1 := sh.Args[0] 33337 if x1.Op != OpAMD64MOVLloadidx1 { 33338 break 33339 } 33340 i1 := x1.AuxInt 33341 s := x1.Aux 33342 mem := x1.Args[2] 33343 idx := x1.Args[0] 33344 p := x1.Args[1] 33345 x0 := v.Args[1] 33346 if x0.Op != OpAMD64MOVLloadidx1 { 33347 break 33348 } 33349 i0 := x0.AuxInt 33350 if x0.Aux != s { 33351 break 33352 } 33353 _ = x0.Args[2] 33354 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33355 break 33356 } 33357 b = mergePoint(b, x0, x1) 33358 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 33359 v.reset(OpCopy) 33360 v.AddArg(v0) 33361 v0.AuxInt = i0 33362 v0.Aux = s 33363 v0.AddArg(p) 33364 v0.AddArg(idx) 33365 v0.AddArg(mem) 33366 return true 33367 } 33368 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 33369 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33370 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 33371 for { 33372 _ = v.Args[1] 33373 sh := v.Args[0] 33374 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 33375 break 33376 } 33377 x1 := sh.Args[0] 33378 if x1.Op != OpAMD64MOVLloadidx1 { 33379 break 33380 } 33381 i1 := x1.AuxInt 33382 s := x1.Aux 33383 mem := x1.Args[2] 33384 p := x1.Args[0] 33385 idx := x1.Args[1] 33386 x0 := v.Args[1] 33387 if x0.Op != OpAMD64MOVLloadidx1 { 33388 break 33389 } 33390 i0 := x0.AuxInt 33391 if x0.Aux != s { 33392 break 33393 } 33394 _ = x0.Args[2] 33395 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33396 break 33397 } 33398 b = mergePoint(b, x0, x1) 33399 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 33400 v.reset(OpCopy) 33401 v.AddArg(v0) 33402 v0.AuxInt = i0 33403 v0.Aux = s 33404 v0.AddArg(p) 33405 v0.AddArg(idx) 33406 v0.AddArg(mem) 33407 return true 33408 } 33409 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 33410 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 33411 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 33412 for { 33413 _ = v.Args[1] 33414 sh := v.Args[0] 33415 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 33416 break 33417 } 33418 x1 := sh.Args[0] 33419 if x1.Op != OpAMD64MOVLloadidx1 { 33420 break 33421 } 33422 i1 := x1.AuxInt 33423 s := x1.Aux 33424 mem := x1.Args[2] 33425 idx := x1.Args[0] 33426 p := x1.Args[1] 33427 x0 := v.Args[1] 33428 if x0.Op != OpAMD64MOVLloadidx1 { 33429 break 33430 } 33431 i0 := x0.AuxInt 33432 if x0.Aux != s { 33433 break 33434 } 33435 _ = x0.Args[2] 33436 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 33437 break 33438 } 33439 b = mergePoint(b, x0, x1) 33440 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 33441 v.reset(OpCopy) 33442 v.AddArg(v0) 33443 v0.AuxInt = i0 33444 v0.Aux = s 33445 v0.AddArg(p) 33446 v0.AddArg(idx) 33447 v0.AddArg(mem) 33448 return true 33449 } 33450 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 33451 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33452 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 33453 for { 33454 _ = v.Args[1] 33455 s1 := v.Args[0] 33456 if s1.Op != OpAMD64SHLQconst { 33457 break 33458 } 33459 j1 := s1.AuxInt 33460 x1 := s1.Args[0] 33461 if x1.Op != OpAMD64MOVBloadidx1 { 33462 break 33463 } 33464 i1 := x1.AuxInt 33465 s := x1.Aux 33466 mem := x1.Args[2] 33467 p := x1.Args[0] 33468 idx := x1.Args[1] 33469 or := v.Args[1] 33470 if or.Op != OpAMD64ORQ { 33471 break 33472 } 33473 y := or.Args[1] 33474 s0 := or.Args[0] 33475 if s0.Op != OpAMD64SHLQconst { 33476 break 33477 } 33478 j0 := s0.AuxInt 33479 x0 := s0.Args[0] 33480 if x0.Op != OpAMD64MOVBloadidx1 { 33481 break 33482 } 33483 i0 := x0.AuxInt 33484 if x0.Aux != s { 33485 break 33486 } 33487 _ = x0.Args[2] 33488 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33489 break 33490 } 33491 b = mergePoint(b, x0, x1, y) 33492 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33493 v.reset(OpCopy) 33494 v.AddArg(v0) 33495 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33496 v1.AuxInt = j0 33497 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33498 v2.AuxInt = i0 33499 v2.Aux = s 33500 v2.AddArg(p) 33501 v2.AddArg(idx) 33502 v2.AddArg(mem) 33503 v1.AddArg(v2) 33504 v0.AddArg(v1) 33505 v0.AddArg(y) 33506 return true 33507 } 33508 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 33509 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33510 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 33511 for { 33512 _ = v.Args[1] 33513 s1 := v.Args[0] 33514 if s1.Op != OpAMD64SHLQconst { 33515 break 33516 } 33517 j1 := s1.AuxInt 33518 x1 := s1.Args[0] 33519 if x1.Op != OpAMD64MOVBloadidx1 { 33520 break 33521 } 33522 i1 := x1.AuxInt 33523 s := x1.Aux 33524 mem := x1.Args[2] 33525 idx := x1.Args[0] 33526 p := x1.Args[1] 33527 or := v.Args[1] 33528 if or.Op != OpAMD64ORQ { 33529 break 33530 } 33531 y := or.Args[1] 33532 s0 := or.Args[0] 33533 if s0.Op != OpAMD64SHLQconst { 33534 break 33535 } 33536 j0 := s0.AuxInt 33537 x0 := s0.Args[0] 33538 if x0.Op != OpAMD64MOVBloadidx1 { 33539 break 33540 } 33541 i0 := x0.AuxInt 33542 if x0.Aux != s { 33543 break 33544 } 33545 _ = x0.Args[2] 33546 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33547 break 33548 } 33549 b = mergePoint(b, x0, x1, y) 33550 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33551 v.reset(OpCopy) 33552 v.AddArg(v0) 33553 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33554 v1.AuxInt = j0 33555 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33556 v2.AuxInt = i0 33557 v2.Aux = s 33558 v2.AddArg(p) 33559 v2.AddArg(idx) 33560 v2.AddArg(mem) 33561 v1.AddArg(v2) 33562 v0.AddArg(v1) 33563 v0.AddArg(y) 33564 return true 33565 } 33566 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 33567 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33568 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 33569 for { 33570 _ = v.Args[1] 33571 s1 := v.Args[0] 33572 if s1.Op != OpAMD64SHLQconst { 33573 break 33574 } 33575 j1 := s1.AuxInt 33576 x1 := s1.Args[0] 33577 if x1.Op != OpAMD64MOVBloadidx1 { 33578 break 33579 } 33580 i1 := x1.AuxInt 33581 s := x1.Aux 33582 mem := x1.Args[2] 33583 p := x1.Args[0] 33584 idx := x1.Args[1] 33585 or := v.Args[1] 33586 if or.Op != OpAMD64ORQ { 33587 break 33588 } 33589 y := or.Args[1] 33590 s0 := or.Args[0] 33591 if s0.Op != OpAMD64SHLQconst { 33592 break 33593 } 33594 j0 := s0.AuxInt 33595 x0 := s0.Args[0] 33596 if x0.Op != OpAMD64MOVBloadidx1 { 33597 break 33598 } 33599 i0 := x0.AuxInt 33600 if x0.Aux != s { 33601 break 33602 } 33603 _ = x0.Args[2] 33604 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33605 break 33606 } 33607 b = mergePoint(b, x0, x1, y) 33608 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33609 v.reset(OpCopy) 33610 v.AddArg(v0) 33611 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33612 v1.AuxInt = j0 33613 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33614 v2.AuxInt = i0 33615 v2.Aux = s 33616 v2.AddArg(p) 33617 v2.AddArg(idx) 33618 v2.AddArg(mem) 33619 v1.AddArg(v2) 33620 v0.AddArg(v1) 33621 v0.AddArg(y) 33622 return true 33623 } 33624 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 33625 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33626 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 33627 for { 33628 _ = v.Args[1] 33629 s1 := v.Args[0] 33630 if s1.Op != OpAMD64SHLQconst { 33631 break 33632 } 33633 j1 := s1.AuxInt 33634 x1 := s1.Args[0] 33635 if x1.Op != OpAMD64MOVBloadidx1 { 33636 break 33637 } 33638 i1 := x1.AuxInt 33639 s := x1.Aux 33640 mem := x1.Args[2] 33641 idx := x1.Args[0] 33642 p := x1.Args[1] 33643 or := v.Args[1] 33644 if or.Op != OpAMD64ORQ { 33645 break 33646 } 33647 y := or.Args[1] 33648 s0 := or.Args[0] 33649 if s0.Op != OpAMD64SHLQconst { 33650 break 33651 } 33652 j0 := s0.AuxInt 33653 x0 := s0.Args[0] 33654 if x0.Op != OpAMD64MOVBloadidx1 { 33655 break 33656 } 33657 i0 := x0.AuxInt 33658 if x0.Aux != s { 33659 break 33660 } 33661 _ = x0.Args[2] 33662 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33663 break 33664 } 33665 b = mergePoint(b, x0, x1, y) 33666 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33667 v.reset(OpCopy) 33668 v.AddArg(v0) 33669 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33670 v1.AuxInt = j0 33671 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33672 v2.AuxInt = i0 33673 v2.Aux = s 33674 v2.AddArg(p) 33675 v2.AddArg(idx) 33676 v2.AddArg(mem) 33677 v1.AddArg(v2) 33678 v0.AddArg(v1) 33679 v0.AddArg(y) 33680 return true 33681 } 33682 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 33683 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33684 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 33685 for { 33686 _ = v.Args[1] 33687 s1 := v.Args[0] 33688 if s1.Op != OpAMD64SHLQconst { 33689 break 33690 } 33691 j1 := s1.AuxInt 33692 x1 := s1.Args[0] 33693 if x1.Op != OpAMD64MOVBloadidx1 { 33694 break 33695 } 33696 i1 := x1.AuxInt 33697 s := x1.Aux 33698 mem := x1.Args[2] 33699 p := x1.Args[0] 33700 idx := x1.Args[1] 33701 or := v.Args[1] 33702 if or.Op != OpAMD64ORQ { 33703 break 33704 } 33705 _ = or.Args[1] 33706 y := or.Args[0] 33707 s0 := or.Args[1] 33708 if s0.Op != OpAMD64SHLQconst { 33709 break 33710 } 33711 j0 := s0.AuxInt 33712 x0 := s0.Args[0] 33713 if x0.Op != OpAMD64MOVBloadidx1 { 33714 break 33715 } 33716 i0 := x0.AuxInt 33717 if x0.Aux != s { 33718 break 33719 } 33720 _ = x0.Args[2] 33721 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33722 break 33723 } 33724 b = mergePoint(b, x0, x1, y) 33725 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33726 v.reset(OpCopy) 33727 v.AddArg(v0) 33728 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33729 v1.AuxInt = j0 33730 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33731 v2.AuxInt = i0 33732 v2.Aux = s 33733 v2.AddArg(p) 33734 v2.AddArg(idx) 33735 v2.AddArg(mem) 33736 v1.AddArg(v2) 33737 v0.AddArg(v1) 33738 v0.AddArg(y) 33739 return true 33740 } 33741 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 33742 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33743 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 33744 for { 33745 _ = v.Args[1] 33746 s1 := v.Args[0] 33747 if s1.Op != OpAMD64SHLQconst { 33748 break 33749 } 33750 j1 := s1.AuxInt 33751 x1 := s1.Args[0] 33752 if x1.Op != OpAMD64MOVBloadidx1 { 33753 break 33754 } 33755 i1 := x1.AuxInt 33756 s := x1.Aux 33757 mem := x1.Args[2] 33758 idx := x1.Args[0] 33759 p := x1.Args[1] 33760 or := v.Args[1] 33761 if or.Op != OpAMD64ORQ { 33762 break 33763 } 33764 _ = or.Args[1] 33765 y := or.Args[0] 33766 s0 := or.Args[1] 33767 if s0.Op != OpAMD64SHLQconst { 33768 break 33769 } 33770 j0 := s0.AuxInt 33771 x0 := s0.Args[0] 33772 if x0.Op != OpAMD64MOVBloadidx1 { 33773 break 33774 } 33775 i0 := x0.AuxInt 33776 if x0.Aux != s { 33777 break 33778 } 33779 _ = x0.Args[2] 33780 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33781 break 33782 } 33783 b = mergePoint(b, x0, x1, y) 33784 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33785 v.reset(OpCopy) 33786 v.AddArg(v0) 33787 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33788 v1.AuxInt = j0 33789 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33790 v2.AuxInt = i0 33791 v2.Aux = s 33792 v2.AddArg(p) 33793 v2.AddArg(idx) 33794 v2.AddArg(mem) 33795 v1.AddArg(v2) 33796 v0.AddArg(v1) 33797 v0.AddArg(y) 33798 return true 33799 } 33800 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 33801 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33802 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 33803 for { 33804 _ = v.Args[1] 33805 s1 := v.Args[0] 33806 if s1.Op != OpAMD64SHLQconst { 33807 break 33808 } 33809 j1 := s1.AuxInt 33810 x1 := s1.Args[0] 33811 if x1.Op != OpAMD64MOVBloadidx1 { 33812 break 33813 } 33814 i1 := x1.AuxInt 33815 s := x1.Aux 33816 mem := x1.Args[2] 33817 p := x1.Args[0] 33818 idx := x1.Args[1] 33819 or := v.Args[1] 33820 if or.Op != OpAMD64ORQ { 33821 break 33822 } 33823 _ = or.Args[1] 33824 y := or.Args[0] 33825 s0 := or.Args[1] 33826 if s0.Op != OpAMD64SHLQconst { 33827 break 33828 } 33829 j0 := s0.AuxInt 33830 x0 := s0.Args[0] 33831 if x0.Op != OpAMD64MOVBloadidx1 { 33832 break 33833 } 33834 i0 := x0.AuxInt 33835 if x0.Aux != s { 33836 break 33837 } 33838 _ = x0.Args[2] 33839 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33840 break 33841 } 33842 b = mergePoint(b, x0, x1, y) 33843 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33844 v.reset(OpCopy) 33845 v.AddArg(v0) 33846 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33847 v1.AuxInt = j0 33848 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33849 v2.AuxInt = i0 33850 v2.Aux = s 33851 v2.AddArg(p) 33852 v2.AddArg(idx) 33853 v2.AddArg(mem) 33854 v1.AddArg(v2) 33855 v0.AddArg(v1) 33856 v0.AddArg(y) 33857 return true 33858 } 33859 return false 33860 } 33861 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { 33862 b := v.Block 33863 typ := &b.Func.Config.Types 33864 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 33865 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33866 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 33867 for { 33868 _ = v.Args[1] 33869 s1 := v.Args[0] 33870 if s1.Op != OpAMD64SHLQconst { 33871 break 33872 } 33873 j1 := s1.AuxInt 33874 x1 := s1.Args[0] 33875 if x1.Op != OpAMD64MOVBloadidx1 { 33876 break 33877 } 33878 i1 := x1.AuxInt 33879 s := x1.Aux 33880 mem := x1.Args[2] 33881 idx := x1.Args[0] 33882 p := x1.Args[1] 33883 or := v.Args[1] 33884 if or.Op != OpAMD64ORQ { 33885 break 33886 } 33887 _ = or.Args[1] 33888 y := or.Args[0] 33889 s0 := or.Args[1] 33890 if s0.Op != OpAMD64SHLQconst { 33891 break 33892 } 33893 j0 := s0.AuxInt 33894 x0 := s0.Args[0] 33895 if x0.Op != OpAMD64MOVBloadidx1 { 33896 break 33897 } 33898 i0 := x0.AuxInt 33899 if x0.Aux != s { 33900 break 33901 } 33902 _ = x0.Args[2] 33903 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33904 break 33905 } 33906 b = mergePoint(b, x0, x1, y) 33907 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33908 v.reset(OpCopy) 33909 v.AddArg(v0) 33910 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33911 v1.AuxInt = j0 33912 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33913 v2.AuxInt = i0 33914 v2.Aux = s 33915 v2.AddArg(p) 33916 v2.AddArg(idx) 33917 v2.AddArg(mem) 33918 v1.AddArg(v2) 33919 v0.AddArg(v1) 33920 v0.AddArg(y) 33921 return true 33922 } 33923 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 33924 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33925 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 33926 for { 33927 _ = v.Args[1] 33928 or := v.Args[0] 33929 if or.Op != OpAMD64ORQ { 33930 break 33931 } 33932 y := or.Args[1] 33933 s0 := or.Args[0] 33934 if s0.Op != OpAMD64SHLQconst { 33935 break 33936 } 33937 j0 := s0.AuxInt 33938 x0 := s0.Args[0] 33939 if x0.Op != OpAMD64MOVBloadidx1 { 33940 break 33941 } 33942 i0 := x0.AuxInt 33943 s := x0.Aux 33944 mem := x0.Args[2] 33945 p := x0.Args[0] 33946 idx := x0.Args[1] 33947 s1 := v.Args[1] 33948 if s1.Op != OpAMD64SHLQconst { 33949 break 33950 } 33951 j1 := s1.AuxInt 33952 x1 := s1.Args[0] 33953 if x1.Op != OpAMD64MOVBloadidx1 { 33954 break 33955 } 33956 i1 := x1.AuxInt 33957 if x1.Aux != s { 33958 break 33959 } 33960 _ = x1.Args[2] 33961 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 33962 break 33963 } 33964 b = mergePoint(b, x0, x1, y) 33965 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33966 v.reset(OpCopy) 33967 v.AddArg(v0) 33968 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33969 v1.AuxInt = j0 33970 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 33971 v2.AuxInt = i0 33972 v2.Aux = s 33973 v2.AddArg(p) 33974 v2.AddArg(idx) 33975 v2.AddArg(mem) 33976 v1.AddArg(v2) 33977 v0.AddArg(v1) 33978 v0.AddArg(y) 33979 return true 33980 } 33981 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 33982 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 33983 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 33984 for { 33985 _ = v.Args[1] 33986 or := v.Args[0] 33987 if or.Op != OpAMD64ORQ { 33988 break 33989 } 33990 y := or.Args[1] 33991 s0 := or.Args[0] 33992 if s0.Op != OpAMD64SHLQconst { 33993 break 33994 } 33995 j0 := s0.AuxInt 33996 x0 := s0.Args[0] 33997 if x0.Op != OpAMD64MOVBloadidx1 { 33998 break 33999 } 34000 i0 := x0.AuxInt 34001 s := x0.Aux 34002 mem := x0.Args[2] 34003 idx := x0.Args[0] 34004 p := x0.Args[1] 34005 s1 := v.Args[1] 34006 if s1.Op != OpAMD64SHLQconst { 34007 break 34008 } 34009 j1 := s1.AuxInt 34010 x1 := s1.Args[0] 34011 if x1.Op != OpAMD64MOVBloadidx1 { 34012 break 34013 } 34014 i1 := x1.AuxInt 34015 if x1.Aux != s { 34016 break 34017 } 34018 _ = x1.Args[2] 34019 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34020 break 34021 } 34022 b = mergePoint(b, x0, x1, y) 34023 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34024 v.reset(OpCopy) 34025 v.AddArg(v0) 34026 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34027 v1.AuxInt = j0 34028 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34029 v2.AuxInt = i0 34030 v2.Aux = s 34031 v2.AddArg(p) 34032 v2.AddArg(idx) 34033 v2.AddArg(mem) 34034 v1.AddArg(v2) 34035 v0.AddArg(v1) 34036 v0.AddArg(y) 34037 return true 34038 } 34039 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 34040 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34041 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 34042 for { 34043 _ = v.Args[1] 34044 or := v.Args[0] 34045 if or.Op != OpAMD64ORQ { 34046 break 34047 } 34048 _ = or.Args[1] 34049 y := or.Args[0] 34050 s0 := or.Args[1] 34051 if s0.Op != OpAMD64SHLQconst { 34052 break 34053 } 34054 j0 := s0.AuxInt 34055 x0 := s0.Args[0] 34056 if x0.Op != OpAMD64MOVBloadidx1 { 34057 break 34058 } 34059 i0 := x0.AuxInt 34060 s := x0.Aux 34061 mem := x0.Args[2] 34062 p := x0.Args[0] 34063 idx := x0.Args[1] 34064 s1 := v.Args[1] 34065 if s1.Op != OpAMD64SHLQconst { 34066 break 34067 } 34068 j1 := s1.AuxInt 34069 x1 := s1.Args[0] 34070 if x1.Op != OpAMD64MOVBloadidx1 { 34071 break 34072 } 34073 i1 := x1.AuxInt 34074 if x1.Aux != s { 34075 break 34076 } 34077 _ = x1.Args[2] 34078 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34079 break 34080 } 34081 b = mergePoint(b, x0, x1, y) 34082 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34083 v.reset(OpCopy) 34084 v.AddArg(v0) 34085 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34086 v1.AuxInt = j0 34087 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34088 v2.AuxInt = i0 34089 v2.Aux = s 34090 v2.AddArg(p) 34091 v2.AddArg(idx) 34092 v2.AddArg(mem) 34093 v1.AddArg(v2) 34094 v0.AddArg(v1) 34095 v0.AddArg(y) 34096 return true 34097 } 34098 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 34099 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34100 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 34101 for { 34102 _ = v.Args[1] 34103 or := v.Args[0] 34104 if or.Op != OpAMD64ORQ { 34105 break 34106 } 34107 _ = or.Args[1] 34108 y := or.Args[0] 34109 s0 := or.Args[1] 34110 if s0.Op != OpAMD64SHLQconst { 34111 break 34112 } 34113 j0 := s0.AuxInt 34114 x0 := s0.Args[0] 34115 if x0.Op != OpAMD64MOVBloadidx1 { 34116 break 34117 } 34118 i0 := x0.AuxInt 34119 s := x0.Aux 34120 mem := x0.Args[2] 34121 idx := x0.Args[0] 34122 p := x0.Args[1] 34123 s1 := v.Args[1] 34124 if s1.Op != OpAMD64SHLQconst { 34125 break 34126 } 34127 j1 := s1.AuxInt 34128 x1 := s1.Args[0] 34129 if x1.Op != OpAMD64MOVBloadidx1 { 34130 break 34131 } 34132 i1 := x1.AuxInt 34133 if x1.Aux != s { 34134 break 34135 } 34136 _ = x1.Args[2] 34137 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34138 break 34139 } 34140 b = mergePoint(b, x0, x1, y) 34141 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34142 v.reset(OpCopy) 34143 v.AddArg(v0) 34144 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34145 v1.AuxInt = j0 34146 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34147 v2.AuxInt = i0 34148 v2.Aux = s 34149 v2.AddArg(p) 34150 v2.AddArg(idx) 34151 v2.AddArg(mem) 34152 v1.AddArg(v2) 34153 v0.AddArg(v1) 34154 v0.AddArg(y) 34155 return true 34156 } 34157 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 34158 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34159 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 34160 for { 34161 _ = v.Args[1] 34162 or := v.Args[0] 34163 if or.Op != OpAMD64ORQ { 34164 break 34165 } 34166 y := or.Args[1] 34167 s0 := or.Args[0] 34168 if s0.Op != OpAMD64SHLQconst { 34169 break 34170 } 34171 j0 := s0.AuxInt 34172 x0 := s0.Args[0] 34173 if x0.Op != OpAMD64MOVBloadidx1 { 34174 break 34175 } 34176 i0 := x0.AuxInt 34177 s := x0.Aux 34178 mem := x0.Args[2] 34179 p := x0.Args[0] 34180 idx := x0.Args[1] 34181 s1 := v.Args[1] 34182 if s1.Op != OpAMD64SHLQconst { 34183 break 34184 } 34185 j1 := s1.AuxInt 34186 x1 := s1.Args[0] 34187 if x1.Op != OpAMD64MOVBloadidx1 { 34188 break 34189 } 34190 i1 := x1.AuxInt 34191 if x1.Aux != s { 34192 break 34193 } 34194 _ = x1.Args[2] 34195 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34196 break 34197 } 34198 b = mergePoint(b, x0, x1, y) 34199 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34200 v.reset(OpCopy) 34201 v.AddArg(v0) 34202 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34203 v1.AuxInt = j0 34204 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34205 v2.AuxInt = i0 34206 v2.Aux = s 34207 v2.AddArg(p) 34208 v2.AddArg(idx) 34209 v2.AddArg(mem) 34210 v1.AddArg(v2) 34211 v0.AddArg(v1) 34212 v0.AddArg(y) 34213 return true 34214 } 34215 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 34216 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34217 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 34218 for { 34219 _ = v.Args[1] 34220 or := v.Args[0] 34221 if or.Op != OpAMD64ORQ { 34222 break 34223 } 34224 y := or.Args[1] 34225 s0 := or.Args[0] 34226 if s0.Op != OpAMD64SHLQconst { 34227 break 34228 } 34229 j0 := s0.AuxInt 34230 x0 := s0.Args[0] 34231 if x0.Op != OpAMD64MOVBloadidx1 { 34232 break 34233 } 34234 i0 := x0.AuxInt 34235 s := x0.Aux 34236 mem := x0.Args[2] 34237 idx := x0.Args[0] 34238 p := x0.Args[1] 34239 s1 := v.Args[1] 34240 if s1.Op != OpAMD64SHLQconst { 34241 break 34242 } 34243 j1 := s1.AuxInt 34244 x1 := s1.Args[0] 34245 if x1.Op != OpAMD64MOVBloadidx1 { 34246 break 34247 } 34248 i1 := x1.AuxInt 34249 if x1.Aux != s { 34250 break 34251 } 34252 _ = x1.Args[2] 34253 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34254 break 34255 } 34256 b = mergePoint(b, x0, x1, y) 34257 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34258 v.reset(OpCopy) 34259 v.AddArg(v0) 34260 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34261 v1.AuxInt = j0 34262 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34263 v2.AuxInt = i0 34264 v2.Aux = s 34265 v2.AddArg(p) 34266 v2.AddArg(idx) 34267 v2.AddArg(mem) 34268 v1.AddArg(v2) 34269 v0.AddArg(v1) 34270 v0.AddArg(y) 34271 return true 34272 } 34273 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 34274 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34275 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 34276 for { 34277 _ = v.Args[1] 34278 or := v.Args[0] 34279 if or.Op != OpAMD64ORQ { 34280 break 34281 } 34282 _ = or.Args[1] 34283 y := or.Args[0] 34284 s0 := or.Args[1] 34285 if s0.Op != OpAMD64SHLQconst { 34286 break 34287 } 34288 j0 := s0.AuxInt 34289 x0 := s0.Args[0] 34290 if x0.Op != OpAMD64MOVBloadidx1 { 34291 break 34292 } 34293 i0 := x0.AuxInt 34294 s := x0.Aux 34295 mem := x0.Args[2] 34296 p := x0.Args[0] 34297 idx := x0.Args[1] 34298 s1 := v.Args[1] 34299 if s1.Op != OpAMD64SHLQconst { 34300 break 34301 } 34302 j1 := s1.AuxInt 34303 x1 := s1.Args[0] 34304 if x1.Op != OpAMD64MOVBloadidx1 { 34305 break 34306 } 34307 i1 := x1.AuxInt 34308 if x1.Aux != s { 34309 break 34310 } 34311 _ = x1.Args[2] 34312 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34313 break 34314 } 34315 b = mergePoint(b, x0, x1, y) 34316 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34317 v.reset(OpCopy) 34318 v.AddArg(v0) 34319 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34320 v1.AuxInt = j0 34321 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34322 v2.AuxInt = i0 34323 v2.Aux = s 34324 v2.AddArg(p) 34325 v2.AddArg(idx) 34326 v2.AddArg(mem) 34327 v1.AddArg(v2) 34328 v0.AddArg(v1) 34329 v0.AddArg(y) 34330 return true 34331 } 34332 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 34333 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34334 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 34335 for { 34336 _ = v.Args[1] 34337 or := v.Args[0] 34338 if or.Op != OpAMD64ORQ { 34339 break 34340 } 34341 _ = or.Args[1] 34342 y := or.Args[0] 34343 s0 := or.Args[1] 34344 if s0.Op != OpAMD64SHLQconst { 34345 break 34346 } 34347 j0 := s0.AuxInt 34348 x0 := s0.Args[0] 34349 if x0.Op != OpAMD64MOVBloadidx1 { 34350 break 34351 } 34352 i0 := x0.AuxInt 34353 s := x0.Aux 34354 mem := x0.Args[2] 34355 idx := x0.Args[0] 34356 p := x0.Args[1] 34357 s1 := v.Args[1] 34358 if s1.Op != OpAMD64SHLQconst { 34359 break 34360 } 34361 j1 := s1.AuxInt 34362 x1 := s1.Args[0] 34363 if x1.Op != OpAMD64MOVBloadidx1 { 34364 break 34365 } 34366 i1 := x1.AuxInt 34367 if x1.Aux != s { 34368 break 34369 } 34370 _ = x1.Args[2] 34371 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34372 break 34373 } 34374 b = mergePoint(b, x0, x1, y) 34375 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34376 v.reset(OpCopy) 34377 v.AddArg(v0) 34378 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34379 v1.AuxInt = j0 34380 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 34381 v2.AuxInt = i0 34382 v2.Aux = s 34383 v2.AddArg(p) 34384 v2.AddArg(idx) 34385 v2.AddArg(mem) 34386 v1.AddArg(v2) 34387 v0.AddArg(v1) 34388 v0.AddArg(y) 34389 return true 34390 } 34391 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 34392 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34393 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 34394 for { 34395 _ = v.Args[1] 34396 s1 := v.Args[0] 34397 if s1.Op != OpAMD64SHLQconst { 34398 break 34399 } 34400 j1 := s1.AuxInt 34401 x1 := s1.Args[0] 34402 if x1.Op != OpAMD64MOVWloadidx1 { 34403 break 34404 } 34405 i1 := x1.AuxInt 34406 s := x1.Aux 34407 mem := x1.Args[2] 34408 p := x1.Args[0] 34409 idx := x1.Args[1] 34410 or := v.Args[1] 34411 if or.Op != OpAMD64ORQ { 34412 break 34413 } 34414 y := or.Args[1] 34415 s0 := or.Args[0] 34416 if s0.Op != OpAMD64SHLQconst { 34417 break 34418 } 34419 j0 := s0.AuxInt 34420 x0 := s0.Args[0] 34421 if x0.Op != OpAMD64MOVWloadidx1 { 34422 break 34423 } 34424 i0 := x0.AuxInt 34425 if x0.Aux != s { 34426 break 34427 } 34428 _ = x0.Args[2] 34429 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34430 break 34431 } 34432 b = mergePoint(b, x0, x1, y) 34433 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34434 v.reset(OpCopy) 34435 v.AddArg(v0) 34436 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34437 v1.AuxInt = j0 34438 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34439 v2.AuxInt = i0 34440 v2.Aux = s 34441 v2.AddArg(p) 34442 v2.AddArg(idx) 34443 v2.AddArg(mem) 34444 v1.AddArg(v2) 34445 v0.AddArg(v1) 34446 v0.AddArg(y) 34447 return true 34448 } 34449 return false 34450 } 34451 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { 34452 b := v.Block 34453 typ := &b.Func.Config.Types 34454 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 34455 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34456 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 34457 for { 34458 _ = v.Args[1] 34459 s1 := v.Args[0] 34460 if s1.Op != OpAMD64SHLQconst { 34461 break 34462 } 34463 j1 := s1.AuxInt 34464 x1 := s1.Args[0] 34465 if x1.Op != OpAMD64MOVWloadidx1 { 34466 break 34467 } 34468 i1 := x1.AuxInt 34469 s := x1.Aux 34470 mem := x1.Args[2] 34471 idx := x1.Args[0] 34472 p := x1.Args[1] 34473 or := v.Args[1] 34474 if or.Op != OpAMD64ORQ { 34475 break 34476 } 34477 y := or.Args[1] 34478 s0 := or.Args[0] 34479 if s0.Op != OpAMD64SHLQconst { 34480 break 34481 } 34482 j0 := s0.AuxInt 34483 x0 := s0.Args[0] 34484 if x0.Op != OpAMD64MOVWloadidx1 { 34485 break 34486 } 34487 i0 := x0.AuxInt 34488 if x0.Aux != s { 34489 break 34490 } 34491 _ = x0.Args[2] 34492 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34493 break 34494 } 34495 b = mergePoint(b, x0, x1, y) 34496 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34497 v.reset(OpCopy) 34498 v.AddArg(v0) 34499 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34500 v1.AuxInt = j0 34501 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34502 v2.AuxInt = i0 34503 v2.Aux = s 34504 v2.AddArg(p) 34505 v2.AddArg(idx) 34506 v2.AddArg(mem) 34507 v1.AddArg(v2) 34508 v0.AddArg(v1) 34509 v0.AddArg(y) 34510 return true 34511 } 34512 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 34513 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34514 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 34515 for { 34516 _ = v.Args[1] 34517 s1 := v.Args[0] 34518 if s1.Op != OpAMD64SHLQconst { 34519 break 34520 } 34521 j1 := s1.AuxInt 34522 x1 := s1.Args[0] 34523 if x1.Op != OpAMD64MOVWloadidx1 { 34524 break 34525 } 34526 i1 := x1.AuxInt 34527 s := x1.Aux 34528 mem := x1.Args[2] 34529 p := x1.Args[0] 34530 idx := x1.Args[1] 34531 or := v.Args[1] 34532 if or.Op != OpAMD64ORQ { 34533 break 34534 } 34535 y := or.Args[1] 34536 s0 := or.Args[0] 34537 if s0.Op != OpAMD64SHLQconst { 34538 break 34539 } 34540 j0 := s0.AuxInt 34541 x0 := s0.Args[0] 34542 if x0.Op != OpAMD64MOVWloadidx1 { 34543 break 34544 } 34545 i0 := x0.AuxInt 34546 if x0.Aux != s { 34547 break 34548 } 34549 _ = x0.Args[2] 34550 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34551 break 34552 } 34553 b = mergePoint(b, x0, x1, y) 34554 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34555 v.reset(OpCopy) 34556 v.AddArg(v0) 34557 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34558 v1.AuxInt = j0 34559 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34560 v2.AuxInt = i0 34561 v2.Aux = s 34562 v2.AddArg(p) 34563 v2.AddArg(idx) 34564 v2.AddArg(mem) 34565 v1.AddArg(v2) 34566 v0.AddArg(v1) 34567 v0.AddArg(y) 34568 return true 34569 } 34570 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 34571 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34572 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 34573 for { 34574 _ = v.Args[1] 34575 s1 := v.Args[0] 34576 if s1.Op != OpAMD64SHLQconst { 34577 break 34578 } 34579 j1 := s1.AuxInt 34580 x1 := s1.Args[0] 34581 if x1.Op != OpAMD64MOVWloadidx1 { 34582 break 34583 } 34584 i1 := x1.AuxInt 34585 s := x1.Aux 34586 mem := x1.Args[2] 34587 idx := x1.Args[0] 34588 p := x1.Args[1] 34589 or := v.Args[1] 34590 if or.Op != OpAMD64ORQ { 34591 break 34592 } 34593 y := or.Args[1] 34594 s0 := or.Args[0] 34595 if s0.Op != OpAMD64SHLQconst { 34596 break 34597 } 34598 j0 := s0.AuxInt 34599 x0 := s0.Args[0] 34600 if x0.Op != OpAMD64MOVWloadidx1 { 34601 break 34602 } 34603 i0 := x0.AuxInt 34604 if x0.Aux != s { 34605 break 34606 } 34607 _ = x0.Args[2] 34608 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34609 break 34610 } 34611 b = mergePoint(b, x0, x1, y) 34612 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34613 v.reset(OpCopy) 34614 v.AddArg(v0) 34615 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34616 v1.AuxInt = j0 34617 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34618 v2.AuxInt = i0 34619 v2.Aux = s 34620 v2.AddArg(p) 34621 v2.AddArg(idx) 34622 v2.AddArg(mem) 34623 v1.AddArg(v2) 34624 v0.AddArg(v1) 34625 v0.AddArg(y) 34626 return true 34627 } 34628 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 34629 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34630 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 34631 for { 34632 _ = v.Args[1] 34633 s1 := v.Args[0] 34634 if s1.Op != OpAMD64SHLQconst { 34635 break 34636 } 34637 j1 := s1.AuxInt 34638 x1 := s1.Args[0] 34639 if x1.Op != OpAMD64MOVWloadidx1 { 34640 break 34641 } 34642 i1 := x1.AuxInt 34643 s := x1.Aux 34644 mem := x1.Args[2] 34645 p := x1.Args[0] 34646 idx := x1.Args[1] 34647 or := v.Args[1] 34648 if or.Op != OpAMD64ORQ { 34649 break 34650 } 34651 _ = or.Args[1] 34652 y := or.Args[0] 34653 s0 := or.Args[1] 34654 if s0.Op != OpAMD64SHLQconst { 34655 break 34656 } 34657 j0 := s0.AuxInt 34658 x0 := s0.Args[0] 34659 if x0.Op != OpAMD64MOVWloadidx1 { 34660 break 34661 } 34662 i0 := x0.AuxInt 34663 if x0.Aux != s { 34664 break 34665 } 34666 _ = x0.Args[2] 34667 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34668 break 34669 } 34670 b = mergePoint(b, x0, x1, y) 34671 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34672 v.reset(OpCopy) 34673 v.AddArg(v0) 34674 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34675 v1.AuxInt = j0 34676 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34677 v2.AuxInt = i0 34678 v2.Aux = s 34679 v2.AddArg(p) 34680 v2.AddArg(idx) 34681 v2.AddArg(mem) 34682 v1.AddArg(v2) 34683 v0.AddArg(v1) 34684 v0.AddArg(y) 34685 return true 34686 } 34687 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 34688 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34689 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 34690 for { 34691 _ = v.Args[1] 34692 s1 := v.Args[0] 34693 if s1.Op != OpAMD64SHLQconst { 34694 break 34695 } 34696 j1 := s1.AuxInt 34697 x1 := s1.Args[0] 34698 if x1.Op != OpAMD64MOVWloadidx1 { 34699 break 34700 } 34701 i1 := x1.AuxInt 34702 s := x1.Aux 34703 mem := x1.Args[2] 34704 idx := x1.Args[0] 34705 p := x1.Args[1] 34706 or := v.Args[1] 34707 if or.Op != OpAMD64ORQ { 34708 break 34709 } 34710 _ = or.Args[1] 34711 y := or.Args[0] 34712 s0 := or.Args[1] 34713 if s0.Op != OpAMD64SHLQconst { 34714 break 34715 } 34716 j0 := s0.AuxInt 34717 x0 := s0.Args[0] 34718 if x0.Op != OpAMD64MOVWloadidx1 { 34719 break 34720 } 34721 i0 := x0.AuxInt 34722 if x0.Aux != s { 34723 break 34724 } 34725 _ = x0.Args[2] 34726 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34727 break 34728 } 34729 b = mergePoint(b, x0, x1, y) 34730 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34731 v.reset(OpCopy) 34732 v.AddArg(v0) 34733 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34734 v1.AuxInt = j0 34735 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34736 v2.AuxInt = i0 34737 v2.Aux = s 34738 v2.AddArg(p) 34739 v2.AddArg(idx) 34740 v2.AddArg(mem) 34741 v1.AddArg(v2) 34742 v0.AddArg(v1) 34743 v0.AddArg(y) 34744 return true 34745 } 34746 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 34747 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34748 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 34749 for { 34750 _ = v.Args[1] 34751 s1 := v.Args[0] 34752 if s1.Op != OpAMD64SHLQconst { 34753 break 34754 } 34755 j1 := s1.AuxInt 34756 x1 := s1.Args[0] 34757 if x1.Op != OpAMD64MOVWloadidx1 { 34758 break 34759 } 34760 i1 := x1.AuxInt 34761 s := x1.Aux 34762 mem := x1.Args[2] 34763 p := x1.Args[0] 34764 idx := x1.Args[1] 34765 or := v.Args[1] 34766 if or.Op != OpAMD64ORQ { 34767 break 34768 } 34769 _ = or.Args[1] 34770 y := or.Args[0] 34771 s0 := or.Args[1] 34772 if s0.Op != OpAMD64SHLQconst { 34773 break 34774 } 34775 j0 := s0.AuxInt 34776 x0 := s0.Args[0] 34777 if x0.Op != OpAMD64MOVWloadidx1 { 34778 break 34779 } 34780 i0 := x0.AuxInt 34781 if x0.Aux != s { 34782 break 34783 } 34784 _ = x0.Args[2] 34785 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34786 break 34787 } 34788 b = mergePoint(b, x0, x1, y) 34789 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34790 v.reset(OpCopy) 34791 v.AddArg(v0) 34792 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34793 v1.AuxInt = j0 34794 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34795 v2.AuxInt = i0 34796 v2.Aux = s 34797 v2.AddArg(p) 34798 v2.AddArg(idx) 34799 v2.AddArg(mem) 34800 v1.AddArg(v2) 34801 v0.AddArg(v1) 34802 v0.AddArg(y) 34803 return true 34804 } 34805 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 34806 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34807 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 34808 for { 34809 _ = v.Args[1] 34810 s1 := v.Args[0] 34811 if s1.Op != OpAMD64SHLQconst { 34812 break 34813 } 34814 j1 := s1.AuxInt 34815 x1 := s1.Args[0] 34816 if x1.Op != OpAMD64MOVWloadidx1 { 34817 break 34818 } 34819 i1 := x1.AuxInt 34820 s := x1.Aux 34821 mem := x1.Args[2] 34822 idx := x1.Args[0] 34823 p := x1.Args[1] 34824 or := v.Args[1] 34825 if or.Op != OpAMD64ORQ { 34826 break 34827 } 34828 _ = or.Args[1] 34829 y := or.Args[0] 34830 s0 := or.Args[1] 34831 if s0.Op != OpAMD64SHLQconst { 34832 break 34833 } 34834 j0 := s0.AuxInt 34835 x0 := s0.Args[0] 34836 if x0.Op != OpAMD64MOVWloadidx1 { 34837 break 34838 } 34839 i0 := x0.AuxInt 34840 if x0.Aux != s { 34841 break 34842 } 34843 _ = x0.Args[2] 34844 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34845 break 34846 } 34847 b = mergePoint(b, x0, x1, y) 34848 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34849 v.reset(OpCopy) 34850 v.AddArg(v0) 34851 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34852 v1.AuxInt = j0 34853 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34854 v2.AuxInt = i0 34855 v2.Aux = s 34856 v2.AddArg(p) 34857 v2.AddArg(idx) 34858 v2.AddArg(mem) 34859 v1.AddArg(v2) 34860 v0.AddArg(v1) 34861 v0.AddArg(y) 34862 return true 34863 } 34864 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 34865 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34866 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 34867 for { 34868 _ = v.Args[1] 34869 or := v.Args[0] 34870 if or.Op != OpAMD64ORQ { 34871 break 34872 } 34873 y := or.Args[1] 34874 s0 := or.Args[0] 34875 if s0.Op != OpAMD64SHLQconst { 34876 break 34877 } 34878 j0 := s0.AuxInt 34879 x0 := s0.Args[0] 34880 if x0.Op != OpAMD64MOVWloadidx1 { 34881 break 34882 } 34883 i0 := x0.AuxInt 34884 s := x0.Aux 34885 mem := x0.Args[2] 34886 p := x0.Args[0] 34887 idx := x0.Args[1] 34888 s1 := v.Args[1] 34889 if s1.Op != OpAMD64SHLQconst { 34890 break 34891 } 34892 j1 := s1.AuxInt 34893 x1 := s1.Args[0] 34894 if x1.Op != OpAMD64MOVWloadidx1 { 34895 break 34896 } 34897 i1 := x1.AuxInt 34898 if x1.Aux != s { 34899 break 34900 } 34901 _ = x1.Args[2] 34902 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34903 break 34904 } 34905 b = mergePoint(b, x0, x1, y) 34906 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34907 v.reset(OpCopy) 34908 v.AddArg(v0) 34909 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34910 v1.AuxInt = j0 34911 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34912 v2.AuxInt = i0 34913 v2.Aux = s 34914 v2.AddArg(p) 34915 v2.AddArg(idx) 34916 v2.AddArg(mem) 34917 v1.AddArg(v2) 34918 v0.AddArg(v1) 34919 v0.AddArg(y) 34920 return true 34921 } 34922 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 34923 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34924 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 34925 for { 34926 _ = v.Args[1] 34927 or := v.Args[0] 34928 if or.Op != OpAMD64ORQ { 34929 break 34930 } 34931 y := or.Args[1] 34932 s0 := or.Args[0] 34933 if s0.Op != OpAMD64SHLQconst { 34934 break 34935 } 34936 j0 := s0.AuxInt 34937 x0 := s0.Args[0] 34938 if x0.Op != OpAMD64MOVWloadidx1 { 34939 break 34940 } 34941 i0 := x0.AuxInt 34942 s := x0.Aux 34943 mem := x0.Args[2] 34944 idx := x0.Args[0] 34945 p := x0.Args[1] 34946 s1 := v.Args[1] 34947 if s1.Op != OpAMD64SHLQconst { 34948 break 34949 } 34950 j1 := s1.AuxInt 34951 x1 := s1.Args[0] 34952 if x1.Op != OpAMD64MOVWloadidx1 { 34953 break 34954 } 34955 i1 := x1.AuxInt 34956 if x1.Aux != s { 34957 break 34958 } 34959 _ = x1.Args[2] 34960 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 34961 break 34962 } 34963 b = mergePoint(b, x0, x1, y) 34964 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34965 v.reset(OpCopy) 34966 v.AddArg(v0) 34967 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34968 v1.AuxInt = j0 34969 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34970 v2.AuxInt = i0 34971 v2.Aux = s 34972 v2.AddArg(p) 34973 v2.AddArg(idx) 34974 v2.AddArg(mem) 34975 v1.AddArg(v2) 34976 v0.AddArg(v1) 34977 v0.AddArg(y) 34978 return true 34979 } 34980 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 34981 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 34982 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 34983 for { 34984 _ = v.Args[1] 34985 or := v.Args[0] 34986 if or.Op != OpAMD64ORQ { 34987 break 34988 } 34989 _ = or.Args[1] 34990 y := or.Args[0] 34991 s0 := or.Args[1] 34992 if s0.Op != OpAMD64SHLQconst { 34993 break 34994 } 34995 j0 := s0.AuxInt 34996 x0 := s0.Args[0] 34997 if x0.Op != OpAMD64MOVWloadidx1 { 34998 break 34999 } 35000 i0 := x0.AuxInt 35001 s := x0.Aux 35002 mem := x0.Args[2] 35003 p := x0.Args[0] 35004 idx := x0.Args[1] 35005 s1 := v.Args[1] 35006 if s1.Op != OpAMD64SHLQconst { 35007 break 35008 } 35009 j1 := s1.AuxInt 35010 x1 := s1.Args[0] 35011 if x1.Op != OpAMD64MOVWloadidx1 { 35012 break 35013 } 35014 i1 := x1.AuxInt 35015 if x1.Aux != s { 35016 break 35017 } 35018 _ = x1.Args[2] 35019 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 35020 break 35021 } 35022 b = mergePoint(b, x0, x1, y) 35023 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 35024 v.reset(OpCopy) 35025 v.AddArg(v0) 35026 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 35027 v1.AuxInt = j0 35028 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 35029 v2.AuxInt = i0 35030 v2.Aux = s 35031 v2.AddArg(p) 35032 v2.AddArg(idx) 35033 v2.AddArg(mem) 35034 v1.AddArg(v2) 35035 v0.AddArg(v1) 35036 v0.AddArg(y) 35037 return true 35038 } 35039 return false 35040 } 35041 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { 35042 b := v.Block 35043 typ := &b.Func.Config.Types 35044 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 35045 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 35046 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 35047 for { 35048 _ = v.Args[1] 35049 or := v.Args[0] 35050 if or.Op != OpAMD64ORQ { 35051 break 35052 } 35053 _ = or.Args[1] 35054 y := or.Args[0] 35055 s0 := or.Args[1] 35056 if s0.Op != OpAMD64SHLQconst { 35057 break 35058 } 35059 j0 := s0.AuxInt 35060 x0 := s0.Args[0] 35061 if x0.Op != OpAMD64MOVWloadidx1 { 35062 break 35063 } 35064 i0 := x0.AuxInt 35065 s := x0.Aux 35066 mem := x0.Args[2] 35067 idx := x0.Args[0] 35068 p := x0.Args[1] 35069 s1 := v.Args[1] 35070 if s1.Op != OpAMD64SHLQconst { 35071 break 35072 } 35073 j1 := s1.AuxInt 35074 x1 := s1.Args[0] 35075 if x1.Op != OpAMD64MOVWloadidx1 { 35076 break 35077 } 35078 i1 := x1.AuxInt 35079 if x1.Aux != s { 35080 break 35081 } 35082 _ = x1.Args[2] 35083 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 35084 break 35085 } 35086 b = mergePoint(b, x0, x1, y) 35087 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 35088 v.reset(OpCopy) 35089 v.AddArg(v0) 35090 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 35091 v1.AuxInt = j0 35092 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 35093 v2.AuxInt = i0 35094 v2.Aux = s 35095 v2.AddArg(p) 35096 v2.AddArg(idx) 35097 v2.AddArg(mem) 35098 v1.AddArg(v2) 35099 v0.AddArg(v1) 35100 v0.AddArg(y) 35101 return true 35102 } 35103 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 35104 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 35105 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 35106 for { 35107 _ = v.Args[1] 35108 or := v.Args[0] 35109 if or.Op != OpAMD64ORQ { 35110 break 35111 } 35112 y := or.Args[1] 35113 s0 := or.Args[0] 35114 if s0.Op != OpAMD64SHLQconst { 35115 break 35116 } 35117 j0 := s0.AuxInt 35118 x0 := s0.Args[0] 35119 if x0.Op != OpAMD64MOVWloadidx1 { 35120 break 35121 } 35122 i0 := x0.AuxInt 35123 s := x0.Aux 35124 mem := x0.Args[2] 35125 p := x0.Args[0] 35126 idx := x0.Args[1] 35127 s1 := v.Args[1] 35128 if s1.Op != OpAMD64SHLQconst { 35129 break 35130 } 35131 j1 := s1.AuxInt 35132 x1 := s1.Args[0] 35133 if x1.Op != OpAMD64MOVWloadidx1 { 35134 break 35135 } 35136 i1 := x1.AuxInt 35137 if x1.Aux != s { 35138 break 35139 } 35140 _ = x1.Args[2] 35141 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 35142 break 35143 } 35144 b = mergePoint(b, x0, x1, y) 35145 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 35146 v.reset(OpCopy) 35147 v.AddArg(v0) 35148 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 35149 v1.AuxInt = j0 35150 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 35151 v2.AuxInt = i0 35152 v2.Aux = s 35153 v2.AddArg(p) 35154 v2.AddArg(idx) 35155 v2.AddArg(mem) 35156 v1.AddArg(v2) 35157 v0.AddArg(v1) 35158 v0.AddArg(y) 35159 return true 35160 } 35161 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 35162 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 35163 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 35164 for { 35165 _ = v.Args[1] 35166 or := v.Args[0] 35167 if or.Op != OpAMD64ORQ { 35168 break 35169 } 35170 y := or.Args[1] 35171 s0 := or.Args[0] 35172 if s0.Op != OpAMD64SHLQconst { 35173 break 35174 } 35175 j0 := s0.AuxInt 35176 x0 := s0.Args[0] 35177 if x0.Op != OpAMD64MOVWloadidx1 { 35178 break 35179 } 35180 i0 := x0.AuxInt 35181 s := x0.Aux 35182 mem := x0.Args[2] 35183 idx := x0.Args[0] 35184 p := x0.Args[1] 35185 s1 := v.Args[1] 35186 if s1.Op != OpAMD64SHLQconst { 35187 break 35188 } 35189 j1 := s1.AuxInt 35190 x1 := s1.Args[0] 35191 if x1.Op != OpAMD64MOVWloadidx1 { 35192 break 35193 } 35194 i1 := x1.AuxInt 35195 if x1.Aux != s { 35196 break 35197 } 35198 _ = x1.Args[2] 35199 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 35200 break 35201 } 35202 b = mergePoint(b, x0, x1, y) 35203 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 35204 v.reset(OpCopy) 35205 v.AddArg(v0) 35206 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 35207 v1.AuxInt = j0 35208 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 35209 v2.AuxInt = i0 35210 v2.Aux = s 35211 v2.AddArg(p) 35212 v2.AddArg(idx) 35213 v2.AddArg(mem) 35214 v1.AddArg(v2) 35215 v0.AddArg(v1) 35216 v0.AddArg(y) 35217 return true 35218 } 35219 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 35220 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 35221 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 35222 for { 35223 _ = v.Args[1] 35224 or := v.Args[0] 35225 if or.Op != OpAMD64ORQ { 35226 break 35227 } 35228 _ = or.Args[1] 35229 y := or.Args[0] 35230 s0 := or.Args[1] 35231 if s0.Op != OpAMD64SHLQconst { 35232 break 35233 } 35234 j0 := s0.AuxInt 35235 x0 := s0.Args[0] 35236 if x0.Op != OpAMD64MOVWloadidx1 { 35237 break 35238 } 35239 i0 := x0.AuxInt 35240 s := x0.Aux 35241 mem := x0.Args[2] 35242 p := x0.Args[0] 35243 idx := x0.Args[1] 35244 s1 := v.Args[1] 35245 if s1.Op != OpAMD64SHLQconst { 35246 break 35247 } 35248 j1 := s1.AuxInt 35249 x1 := s1.Args[0] 35250 if x1.Op != OpAMD64MOVWloadidx1 { 35251 break 35252 } 35253 i1 := x1.AuxInt 35254 if x1.Aux != s { 35255 break 35256 } 35257 _ = x1.Args[2] 35258 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 35259 break 35260 } 35261 b = mergePoint(b, x0, x1, y) 35262 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 35263 v.reset(OpCopy) 35264 v.AddArg(v0) 35265 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 35266 v1.AuxInt = j0 35267 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 35268 v2.AuxInt = i0 35269 v2.Aux = s 35270 v2.AddArg(p) 35271 v2.AddArg(idx) 35272 v2.AddArg(mem) 35273 v1.AddArg(v2) 35274 v0.AddArg(v1) 35275 v0.AddArg(y) 35276 return true 35277 } 35278 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 35279 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 35280 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 35281 for { 35282 _ = v.Args[1] 35283 or := v.Args[0] 35284 if or.Op != OpAMD64ORQ { 35285 break 35286 } 35287 _ = or.Args[1] 35288 y := or.Args[0] 35289 s0 := or.Args[1] 35290 if s0.Op != OpAMD64SHLQconst { 35291 break 35292 } 35293 j0 := s0.AuxInt 35294 x0 := s0.Args[0] 35295 if x0.Op != OpAMD64MOVWloadidx1 { 35296 break 35297 } 35298 i0 := x0.AuxInt 35299 s := x0.Aux 35300 mem := x0.Args[2] 35301 idx := x0.Args[0] 35302 p := x0.Args[1] 35303 s1 := v.Args[1] 35304 if s1.Op != OpAMD64SHLQconst { 35305 break 35306 } 35307 j1 := s1.AuxInt 35308 x1 := s1.Args[0] 35309 if x1.Op != OpAMD64MOVWloadidx1 { 35310 break 35311 } 35312 i1 := x1.AuxInt 35313 if x1.Aux != s { 35314 break 35315 } 35316 _ = x1.Args[2] 35317 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 35318 break 35319 } 35320 b = mergePoint(b, x0, x1, y) 35321 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 35322 v.reset(OpCopy) 35323 v.AddArg(v0) 35324 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 35325 v1.AuxInt = j0 35326 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 35327 v2.AuxInt = i0 35328 v2.Aux = s 35329 v2.AddArg(p) 35330 v2.AddArg(idx) 35331 v2.AddArg(mem) 35332 v1.AddArg(v2) 35333 v0.AddArg(v1) 35334 v0.AddArg(y) 35335 return true 35336 } 35337 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 35338 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 35339 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 35340 for { 35341 _ = v.Args[1] 35342 x1 := v.Args[0] 35343 if x1.Op != OpAMD64MOVBload { 35344 break 35345 } 35346 i1 := x1.AuxInt 35347 s := x1.Aux 35348 mem := x1.Args[1] 35349 p := x1.Args[0] 35350 sh := v.Args[1] 35351 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 35352 break 35353 } 35354 x0 := sh.Args[0] 35355 if x0.Op != OpAMD64MOVBload { 35356 break 35357 } 35358 i0 := x0.AuxInt 35359 if x0.Aux != s { 35360 break 35361 } 35362 _ = x0.Args[1] 35363 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 35364 break 35365 } 35366 b = mergePoint(b, x0, x1) 35367 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) 35368 v.reset(OpCopy) 35369 v.AddArg(v0) 35370 v0.AuxInt = 8 35371 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 35372 v1.AuxInt = i0 35373 v1.Aux = s 35374 v1.AddArg(p) 35375 v1.AddArg(mem) 35376 v0.AddArg(v1) 35377 return true 35378 } 35379 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 35380 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 35381 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 35382 for { 35383 _ = v.Args[1] 35384 sh := v.Args[0] 35385 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 35386 break 35387 } 35388 x0 := sh.Args[0] 35389 if x0.Op != OpAMD64MOVBload { 35390 break 35391 } 35392 i0 := x0.AuxInt 35393 s := x0.Aux 35394 mem := x0.Args[1] 35395 p := x0.Args[0] 35396 x1 := v.Args[1] 35397 if x1.Op != OpAMD64MOVBload { 35398 break 35399 } 35400 i1 := x1.AuxInt 35401 if x1.Aux != s { 35402 break 35403 } 35404 _ = x1.Args[1] 35405 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 35406 break 35407 } 35408 b = mergePoint(b, x0, x1) 35409 v0 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, v.Type) 35410 v.reset(OpCopy) 35411 v.AddArg(v0) 35412 v0.AuxInt = 8 35413 v1 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 35414 v1.AuxInt = i0 35415 v1.Aux = s 35416 v1.AddArg(p) 35417 v1.AddArg(mem) 35418 v0.AddArg(v1) 35419 return true 35420 } 35421 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 35422 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 35423 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 35424 for { 35425 _ = v.Args[1] 35426 r1 := v.Args[0] 35427 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 35428 break 35429 } 35430 x1 := r1.Args[0] 35431 if x1.Op != OpAMD64MOVWload { 35432 break 35433 } 35434 i1 := x1.AuxInt 35435 s := x1.Aux 35436 mem := x1.Args[1] 35437 p := x1.Args[0] 35438 sh := v.Args[1] 35439 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 35440 break 35441 } 35442 r0 := sh.Args[0] 35443 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 35444 break 35445 } 35446 x0 := r0.Args[0] 35447 if x0.Op != OpAMD64MOVWload { 35448 break 35449 } 35450 i0 := x0.AuxInt 35451 if x0.Aux != s { 35452 break 35453 } 35454 _ = x0.Args[1] 35455 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 35456 break 35457 } 35458 b = mergePoint(b, x0, x1) 35459 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) 35460 v.reset(OpCopy) 35461 v.AddArg(v0) 35462 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 35463 v1.AuxInt = i0 35464 v1.Aux = s 35465 v1.AddArg(p) 35466 v1.AddArg(mem) 35467 v0.AddArg(v1) 35468 return true 35469 } 35470 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 35471 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 35472 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 35473 for { 35474 _ = v.Args[1] 35475 sh := v.Args[0] 35476 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 35477 break 35478 } 35479 r0 := sh.Args[0] 35480 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 35481 break 35482 } 35483 x0 := r0.Args[0] 35484 if x0.Op != OpAMD64MOVWload { 35485 break 35486 } 35487 i0 := x0.AuxInt 35488 s := x0.Aux 35489 mem := x0.Args[1] 35490 p := x0.Args[0] 35491 r1 := v.Args[1] 35492 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 35493 break 35494 } 35495 x1 := r1.Args[0] 35496 if x1.Op != OpAMD64MOVWload { 35497 break 35498 } 35499 i1 := x1.AuxInt 35500 if x1.Aux != s { 35501 break 35502 } 35503 _ = x1.Args[1] 35504 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 35505 break 35506 } 35507 b = mergePoint(b, x0, x1) 35508 v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, v.Type) 35509 v.reset(OpCopy) 35510 v.AddArg(v0) 35511 v1 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 35512 v1.AuxInt = i0 35513 v1.Aux = s 35514 v1.AddArg(p) 35515 v1.AddArg(mem) 35516 v0.AddArg(v1) 35517 return true 35518 } 35519 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 35520 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 35521 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 35522 for { 35523 _ = v.Args[1] 35524 r1 := v.Args[0] 35525 if r1.Op != OpAMD64BSWAPL { 35526 break 35527 } 35528 x1 := r1.Args[0] 35529 if x1.Op != OpAMD64MOVLload { 35530 break 35531 } 35532 i1 := x1.AuxInt 35533 s := x1.Aux 35534 mem := x1.Args[1] 35535 p := x1.Args[0] 35536 sh := v.Args[1] 35537 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 35538 break 35539 } 35540 r0 := sh.Args[0] 35541 if r0.Op != OpAMD64BSWAPL { 35542 break 35543 } 35544 x0 := r0.Args[0] 35545 if x0.Op != OpAMD64MOVLload { 35546 break 35547 } 35548 i0 := x0.AuxInt 35549 if x0.Aux != s { 35550 break 35551 } 35552 _ = x0.Args[1] 35553 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 35554 break 35555 } 35556 b = mergePoint(b, x0, x1) 35557 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type) 35558 v.reset(OpCopy) 35559 v.AddArg(v0) 35560 v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) 35561 v1.AuxInt = i0 35562 v1.Aux = s 35563 v1.AddArg(p) 35564 v1.AddArg(mem) 35565 v0.AddArg(v1) 35566 return true 35567 } 35568 return false 35569 } 35570 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { 35571 b := v.Block 35572 typ := &b.Func.Config.Types 35573 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 35574 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 35575 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 35576 for { 35577 _ = v.Args[1] 35578 sh := v.Args[0] 35579 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 35580 break 35581 } 35582 r0 := sh.Args[0] 35583 if r0.Op != OpAMD64BSWAPL { 35584 break 35585 } 35586 x0 := r0.Args[0] 35587 if x0.Op != OpAMD64MOVLload { 35588 break 35589 } 35590 i0 := x0.AuxInt 35591 s := x0.Aux 35592 mem := x0.Args[1] 35593 p := x0.Args[0] 35594 r1 := v.Args[1] 35595 if r1.Op != OpAMD64BSWAPL { 35596 break 35597 } 35598 x1 := r1.Args[0] 35599 if x1.Op != OpAMD64MOVLload { 35600 break 35601 } 35602 i1 := x1.AuxInt 35603 if x1.Aux != s { 35604 break 35605 } 35606 _ = x1.Args[1] 35607 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 35608 break 35609 } 35610 b = mergePoint(b, x0, x1) 35611 v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPQ, v.Type) 35612 v.reset(OpCopy) 35613 v.AddArg(v0) 35614 v1 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) 35615 v1.AuxInt = i0 35616 v1.Aux = s 35617 v1.AddArg(p) 35618 v1.AddArg(mem) 35619 v0.AddArg(v1) 35620 return true 35621 } 35622 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 35623 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 35624 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 35625 for { 35626 _ = v.Args[1] 35627 s0 := v.Args[0] 35628 if s0.Op != OpAMD64SHLQconst { 35629 break 35630 } 35631 j0 := s0.AuxInt 35632 x0 := s0.Args[0] 35633 if x0.Op != OpAMD64MOVBload { 35634 break 35635 } 35636 i0 := x0.AuxInt 35637 s := x0.Aux 35638 mem := x0.Args[1] 35639 p := x0.Args[0] 35640 or := v.Args[1] 35641 if or.Op != OpAMD64ORQ { 35642 break 35643 } 35644 y := or.Args[1] 35645 s1 := or.Args[0] 35646 if s1.Op != OpAMD64SHLQconst { 35647 break 35648 } 35649 j1 := s1.AuxInt 35650 x1 := s1.Args[0] 35651 if x1.Op != OpAMD64MOVBload { 35652 break 35653 } 35654 i1 := x1.AuxInt 35655 if x1.Aux != s { 35656 break 35657 } 35658 _ = x1.Args[1] 35659 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 35660 break 35661 } 35662 b = mergePoint(b, x0, x1, y) 35663 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 35664 v.reset(OpCopy) 35665 v.AddArg(v0) 35666 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 35667 v1.AuxInt = j1 35668 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) 35669 v2.AuxInt = 8 35670 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 35671 v3.AuxInt = i0 35672 v3.Aux = s 35673 v3.AddArg(p) 35674 v3.AddArg(mem) 35675 v2.AddArg(v3) 35676 v1.AddArg(v2) 35677 v0.AddArg(v1) 35678 v0.AddArg(y) 35679 return true 35680 } 35681 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 35682 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 35683 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 35684 for { 35685 _ = v.Args[1] 35686 s0 := v.Args[0] 35687 if s0.Op != OpAMD64SHLQconst { 35688 break 35689 } 35690 j0 := s0.AuxInt 35691 x0 := s0.Args[0] 35692 if x0.Op != OpAMD64MOVBload { 35693 break 35694 } 35695 i0 := x0.AuxInt 35696 s := x0.Aux 35697 mem := x0.Args[1] 35698 p := x0.Args[0] 35699 or := v.Args[1] 35700 if or.Op != OpAMD64ORQ { 35701 break 35702 } 35703 _ = or.Args[1] 35704 y := or.Args[0] 35705 s1 := or.Args[1] 35706 if s1.Op != OpAMD64SHLQconst { 35707 break 35708 } 35709 j1 := s1.AuxInt 35710 x1 := s1.Args[0] 35711 if x1.Op != OpAMD64MOVBload { 35712 break 35713 } 35714 i1 := x1.AuxInt 35715 if x1.Aux != s { 35716 break 35717 } 35718 _ = x1.Args[1] 35719 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 35720 break 35721 } 35722 b = mergePoint(b, x0, x1, y) 35723 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 35724 v.reset(OpCopy) 35725 v.AddArg(v0) 35726 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 35727 v1.AuxInt = j1 35728 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) 35729 v2.AuxInt = 8 35730 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) 35731 v3.AuxInt = i0 35732 v3.Aux = s 35733 v3.AddArg(p) 35734 v3.AddArg(mem) 35735 v2.AddArg(v3) 35736 v1.AddArg(v2) 35737 v0.AddArg(v1) 35738 v0.AddArg(y) 35739 return true 35740 } 35741 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 35742 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 35743 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 35744 for { 35745 _ = v.Args[1] 35746 or := v.Args[0] 35747 if or.Op != OpAMD64ORQ { 35748 break 35749 } 35750 y := or.Args[1] 35751 s1 := or.Args[0] 35752 if s1.Op != OpAMD64SHLQconst { 35753 break 35754 } 35755 j1 := s1.AuxInt 35756 x1 := s1.Args[0] 35757 if x1.Op != OpAMD64MOVBload { 35758 break 35759 } 35760 i1 := x1.AuxInt 35761 s := x1.Aux 35762 mem := x1.Args[1] 35763 p := x1.Args[0] 35764 s0 := v.Args[1] 35765 if s0.Op != OpAMD64SHLQconst { 35766 break 35767 } 35768 j0 := s0.AuxInt 35769 x0 := s0.Args[0] 35770 if x0.Op != OpAMD64MOVBload { 35771 break 35772 } 35773 i0 := x0.AuxInt 35774 if x0.Aux != s { 35775 break 35776 } 35777 _ = x0.Args[1] 35778 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 35779 break 35780 } 35781 b = mergePoint(b, x0, x1, y) 35782 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 35783 v.reset(OpCopy) 35784 v.AddArg(v0) 35785 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 35786 v1.AuxInt = j1 35787 v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) 35788 v2.AuxInt = 8 35789 v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 35790 v3.AuxInt = i0 35791 v3.Aux = s 35792 v3.AddArg(p) 35793 v3.AddArg(mem) 35794 v2.AddArg(v3) 35795 v1.AddArg(v2) 35796 v0.AddArg(v1) 35797 v0.AddArg(y) 35798 return true 35799 } 35800 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 35801 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 35802 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 35803 for { 35804 _ = v.Args[1] 35805 or := v.Args[0] 35806 if or.Op != OpAMD64ORQ { 35807 break 35808 } 35809 _ = or.Args[1] 35810 y := or.Args[0] 35811 s1 := or.Args[1] 35812 if s1.Op != OpAMD64SHLQconst { 35813 break 35814 } 35815 j1 := s1.AuxInt 35816 x1 := s1.Args[0] 35817 if x1.Op != OpAMD64MOVBload { 35818 break 35819 } 35820 i1 := x1.AuxInt 35821 s := x1.Aux 35822 mem := x1.Args[1] 35823 p := x1.Args[0] 35824 s0 := v.Args[1] 35825 if s0.Op != OpAMD64SHLQconst { 35826 break 35827 } 35828 j0 := s0.AuxInt 35829 x0 := s0.Args[0] 35830 if x0.Op != OpAMD64MOVBload { 35831 break 35832 } 35833 i0 := x0.AuxInt 35834 if x0.Aux != s { 35835 break 35836 } 35837 _ = x0.Args[1] 35838 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 35839 break 35840 } 35841 b = mergePoint(b, x0, x1, y) 35842 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 35843 v.reset(OpCopy) 35844 v.AddArg(v0) 35845 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 35846 v1.AuxInt = j1 35847 v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) 35848 v2.AuxInt = 8 35849 v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) 35850 v3.AuxInt = i0 35851 v3.Aux = s 35852 v3.AddArg(p) 35853 v3.AddArg(mem) 35854 v2.AddArg(v3) 35855 v1.AddArg(v2) 35856 v0.AddArg(v1) 35857 v0.AddArg(y) 35858 return true 35859 } 35860 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 35861 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 35862 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 35863 for { 35864 _ = v.Args[1] 35865 s0 := v.Args[0] 35866 if s0.Op != OpAMD64SHLQconst { 35867 break 35868 } 35869 j0 := s0.AuxInt 35870 r0 := s0.Args[0] 35871 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 35872 break 35873 } 35874 x0 := r0.Args[0] 35875 if x0.Op != OpAMD64MOVWload { 35876 break 35877 } 35878 i0 := x0.AuxInt 35879 s := x0.Aux 35880 mem := x0.Args[1] 35881 p := x0.Args[0] 35882 or := v.Args[1] 35883 if or.Op != OpAMD64ORQ { 35884 break 35885 } 35886 y := or.Args[1] 35887 s1 := or.Args[0] 35888 if s1.Op != OpAMD64SHLQconst { 35889 break 35890 } 35891 j1 := s1.AuxInt 35892 r1 := s1.Args[0] 35893 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 35894 break 35895 } 35896 x1 := r1.Args[0] 35897 if x1.Op != OpAMD64MOVWload { 35898 break 35899 } 35900 i1 := x1.AuxInt 35901 if x1.Aux != s { 35902 break 35903 } 35904 _ = x1.Args[1] 35905 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 35906 break 35907 } 35908 b = mergePoint(b, x0, x1, y) 35909 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 35910 v.reset(OpCopy) 35911 v.AddArg(v0) 35912 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 35913 v1.AuxInt = j1 35914 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) 35915 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 35916 v3.AuxInt = i0 35917 v3.Aux = s 35918 v3.AddArg(p) 35919 v3.AddArg(mem) 35920 v2.AddArg(v3) 35921 v1.AddArg(v2) 35922 v0.AddArg(v1) 35923 v0.AddArg(y) 35924 return true 35925 } 35926 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 35927 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 35928 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 35929 for { 35930 _ = v.Args[1] 35931 s0 := v.Args[0] 35932 if s0.Op != OpAMD64SHLQconst { 35933 break 35934 } 35935 j0 := s0.AuxInt 35936 r0 := s0.Args[0] 35937 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 35938 break 35939 } 35940 x0 := r0.Args[0] 35941 if x0.Op != OpAMD64MOVWload { 35942 break 35943 } 35944 i0 := x0.AuxInt 35945 s := x0.Aux 35946 mem := x0.Args[1] 35947 p := x0.Args[0] 35948 or := v.Args[1] 35949 if or.Op != OpAMD64ORQ { 35950 break 35951 } 35952 _ = or.Args[1] 35953 y := or.Args[0] 35954 s1 := or.Args[1] 35955 if s1.Op != OpAMD64SHLQconst { 35956 break 35957 } 35958 j1 := s1.AuxInt 35959 r1 := s1.Args[0] 35960 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 35961 break 35962 } 35963 x1 := r1.Args[0] 35964 if x1.Op != OpAMD64MOVWload { 35965 break 35966 } 35967 i1 := x1.AuxInt 35968 if x1.Aux != s { 35969 break 35970 } 35971 _ = x1.Args[1] 35972 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 35973 break 35974 } 35975 b = mergePoint(b, x0, x1, y) 35976 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) 35977 v.reset(OpCopy) 35978 v.AddArg(v0) 35979 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) 35980 v1.AuxInt = j1 35981 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) 35982 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) 35983 v3.AuxInt = i0 35984 v3.Aux = s 35985 v3.AddArg(p) 35986 v3.AddArg(mem) 35987 v2.AddArg(v3) 35988 v1.AddArg(v2) 35989 v0.AddArg(v1) 35990 v0.AddArg(y) 35991 return true 35992 } 35993 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 35994 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 35995 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 35996 for { 35997 _ = v.Args[1] 35998 or := v.Args[0] 35999 if or.Op != OpAMD64ORQ { 36000 break 36001 } 36002 y := or.Args[1] 36003 s1 := or.Args[0] 36004 if s1.Op != OpAMD64SHLQconst { 36005 break 36006 } 36007 j1 := s1.AuxInt 36008 r1 := s1.Args[0] 36009 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 36010 break 36011 } 36012 x1 := r1.Args[0] 36013 if x1.Op != OpAMD64MOVWload { 36014 break 36015 } 36016 i1 := x1.AuxInt 36017 s := x1.Aux 36018 mem := x1.Args[1] 36019 p := x1.Args[0] 36020 s0 := v.Args[1] 36021 if s0.Op != OpAMD64SHLQconst { 36022 break 36023 } 36024 j0 := s0.AuxInt 36025 r0 := s0.Args[0] 36026 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 36027 break 36028 } 36029 x0 := r0.Args[0] 36030 if x0.Op != OpAMD64MOVWload { 36031 break 36032 } 36033 i0 := x0.AuxInt 36034 if x0.Aux != s { 36035 break 36036 } 36037 _ = x0.Args[1] 36038 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 36039 break 36040 } 36041 b = mergePoint(b, x0, x1, y) 36042 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 36043 v.reset(OpCopy) 36044 v.AddArg(v0) 36045 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 36046 v1.AuxInt = j1 36047 v2 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, typ.UInt32) 36048 v3 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 36049 v3.AuxInt = i0 36050 v3.Aux = s 36051 v3.AddArg(p) 36052 v3.AddArg(mem) 36053 v2.AddArg(v3) 36054 v1.AddArg(v2) 36055 v0.AddArg(v1) 36056 v0.AddArg(y) 36057 return true 36058 } 36059 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 36060 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 36061 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 36062 for { 36063 _ = v.Args[1] 36064 or := v.Args[0] 36065 if or.Op != OpAMD64ORQ { 36066 break 36067 } 36068 _ = or.Args[1] 36069 y := or.Args[0] 36070 s1 := or.Args[1] 36071 if s1.Op != OpAMD64SHLQconst { 36072 break 36073 } 36074 j1 := s1.AuxInt 36075 r1 := s1.Args[0] 36076 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 36077 break 36078 } 36079 x1 := r1.Args[0] 36080 if x1.Op != OpAMD64MOVWload { 36081 break 36082 } 36083 i1 := x1.AuxInt 36084 s := x1.Aux 36085 mem := x1.Args[1] 36086 p := x1.Args[0] 36087 s0 := v.Args[1] 36088 if s0.Op != OpAMD64SHLQconst { 36089 break 36090 } 36091 j0 := s0.AuxInt 36092 r0 := s0.Args[0] 36093 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 36094 break 36095 } 36096 x0 := r0.Args[0] 36097 if x0.Op != OpAMD64MOVWload { 36098 break 36099 } 36100 i0 := x0.AuxInt 36101 if x0.Aux != s { 36102 break 36103 } 36104 _ = x0.Args[1] 36105 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 36106 break 36107 } 36108 b = mergePoint(b, x0, x1, y) 36109 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) 36110 v.reset(OpCopy) 36111 v.AddArg(v0) 36112 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) 36113 v1.AuxInt = j1 36114 v2 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, typ.UInt32) 36115 v3 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) 36116 v3.AuxInt = i0 36117 v3.Aux = s 36118 v3.AddArg(p) 36119 v3.AddArg(mem) 36120 v2.AddArg(v3) 36121 v1.AddArg(v2) 36122 v0.AddArg(v1) 36123 v0.AddArg(y) 36124 return true 36125 } 36126 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 36127 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36128 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 36129 for { 36130 _ = v.Args[1] 36131 x1 := v.Args[0] 36132 if x1.Op != OpAMD64MOVBloadidx1 { 36133 break 36134 } 36135 i1 := x1.AuxInt 36136 s := x1.Aux 36137 mem := x1.Args[2] 36138 p := x1.Args[0] 36139 idx := x1.Args[1] 36140 sh := v.Args[1] 36141 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 36142 break 36143 } 36144 x0 := sh.Args[0] 36145 if x0.Op != OpAMD64MOVBloadidx1 { 36146 break 36147 } 36148 i0 := x0.AuxInt 36149 if x0.Aux != s { 36150 break 36151 } 36152 _ = x0.Args[2] 36153 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36154 break 36155 } 36156 b = mergePoint(b, x0, x1) 36157 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 36158 v.reset(OpCopy) 36159 v.AddArg(v0) 36160 v0.AuxInt = 8 36161 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 36162 v1.AuxInt = i0 36163 v1.Aux = s 36164 v1.AddArg(p) 36165 v1.AddArg(idx) 36166 v1.AddArg(mem) 36167 v0.AddArg(v1) 36168 return true 36169 } 36170 return false 36171 } 36172 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { 36173 b := v.Block 36174 typ := &b.Func.Config.Types 36175 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 36176 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36177 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 36178 for { 36179 _ = v.Args[1] 36180 x1 := v.Args[0] 36181 if x1.Op != OpAMD64MOVBloadidx1 { 36182 break 36183 } 36184 i1 := x1.AuxInt 36185 s := x1.Aux 36186 mem := x1.Args[2] 36187 idx := x1.Args[0] 36188 p := x1.Args[1] 36189 sh := v.Args[1] 36190 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 36191 break 36192 } 36193 x0 := sh.Args[0] 36194 if x0.Op != OpAMD64MOVBloadidx1 { 36195 break 36196 } 36197 i0 := x0.AuxInt 36198 if x0.Aux != s { 36199 break 36200 } 36201 _ = x0.Args[2] 36202 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36203 break 36204 } 36205 b = mergePoint(b, x0, x1) 36206 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 36207 v.reset(OpCopy) 36208 v.AddArg(v0) 36209 v0.AuxInt = 8 36210 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 36211 v1.AuxInt = i0 36212 v1.Aux = s 36213 v1.AddArg(p) 36214 v1.AddArg(idx) 36215 v1.AddArg(mem) 36216 v0.AddArg(v1) 36217 return true 36218 } 36219 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 36220 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36221 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 36222 for { 36223 _ = v.Args[1] 36224 x1 := v.Args[0] 36225 if x1.Op != OpAMD64MOVBloadidx1 { 36226 break 36227 } 36228 i1 := x1.AuxInt 36229 s := x1.Aux 36230 mem := x1.Args[2] 36231 p := x1.Args[0] 36232 idx := x1.Args[1] 36233 sh := v.Args[1] 36234 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 36235 break 36236 } 36237 x0 := sh.Args[0] 36238 if x0.Op != OpAMD64MOVBloadidx1 { 36239 break 36240 } 36241 i0 := x0.AuxInt 36242 if x0.Aux != s { 36243 break 36244 } 36245 _ = x0.Args[2] 36246 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36247 break 36248 } 36249 b = mergePoint(b, x0, x1) 36250 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 36251 v.reset(OpCopy) 36252 v.AddArg(v0) 36253 v0.AuxInt = 8 36254 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 36255 v1.AuxInt = i0 36256 v1.Aux = s 36257 v1.AddArg(p) 36258 v1.AddArg(idx) 36259 v1.AddArg(mem) 36260 v0.AddArg(v1) 36261 return true 36262 } 36263 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 36264 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36265 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 36266 for { 36267 _ = v.Args[1] 36268 x1 := v.Args[0] 36269 if x1.Op != OpAMD64MOVBloadidx1 { 36270 break 36271 } 36272 i1 := x1.AuxInt 36273 s := x1.Aux 36274 mem := x1.Args[2] 36275 idx := x1.Args[0] 36276 p := x1.Args[1] 36277 sh := v.Args[1] 36278 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 36279 break 36280 } 36281 x0 := sh.Args[0] 36282 if x0.Op != OpAMD64MOVBloadidx1 { 36283 break 36284 } 36285 i0 := x0.AuxInt 36286 if x0.Aux != s { 36287 break 36288 } 36289 _ = x0.Args[2] 36290 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36291 break 36292 } 36293 b = mergePoint(b, x0, x1) 36294 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 36295 v.reset(OpCopy) 36296 v.AddArg(v0) 36297 v0.AuxInt = 8 36298 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 36299 v1.AuxInt = i0 36300 v1.Aux = s 36301 v1.AddArg(p) 36302 v1.AddArg(idx) 36303 v1.AddArg(mem) 36304 v0.AddArg(v1) 36305 return true 36306 } 36307 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 36308 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36309 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 36310 for { 36311 _ = v.Args[1] 36312 sh := v.Args[0] 36313 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 36314 break 36315 } 36316 x0 := sh.Args[0] 36317 if x0.Op != OpAMD64MOVBloadidx1 { 36318 break 36319 } 36320 i0 := x0.AuxInt 36321 s := x0.Aux 36322 mem := x0.Args[2] 36323 p := x0.Args[0] 36324 idx := x0.Args[1] 36325 x1 := v.Args[1] 36326 if x1.Op != OpAMD64MOVBloadidx1 { 36327 break 36328 } 36329 i1 := x1.AuxInt 36330 if x1.Aux != s { 36331 break 36332 } 36333 _ = x1.Args[2] 36334 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36335 break 36336 } 36337 b = mergePoint(b, x0, x1) 36338 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 36339 v.reset(OpCopy) 36340 v.AddArg(v0) 36341 v0.AuxInt = 8 36342 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 36343 v1.AuxInt = i0 36344 v1.Aux = s 36345 v1.AddArg(p) 36346 v1.AddArg(idx) 36347 v1.AddArg(mem) 36348 v0.AddArg(v1) 36349 return true 36350 } 36351 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 36352 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36353 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 36354 for { 36355 _ = v.Args[1] 36356 sh := v.Args[0] 36357 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 36358 break 36359 } 36360 x0 := sh.Args[0] 36361 if x0.Op != OpAMD64MOVBloadidx1 { 36362 break 36363 } 36364 i0 := x0.AuxInt 36365 s := x0.Aux 36366 mem := x0.Args[2] 36367 idx := x0.Args[0] 36368 p := x0.Args[1] 36369 x1 := v.Args[1] 36370 if x1.Op != OpAMD64MOVBloadidx1 { 36371 break 36372 } 36373 i1 := x1.AuxInt 36374 if x1.Aux != s { 36375 break 36376 } 36377 _ = x1.Args[2] 36378 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36379 break 36380 } 36381 b = mergePoint(b, x0, x1) 36382 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 36383 v.reset(OpCopy) 36384 v.AddArg(v0) 36385 v0.AuxInt = 8 36386 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 36387 v1.AuxInt = i0 36388 v1.Aux = s 36389 v1.AddArg(p) 36390 v1.AddArg(idx) 36391 v1.AddArg(mem) 36392 v0.AddArg(v1) 36393 return true 36394 } 36395 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 36396 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36397 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 36398 for { 36399 _ = v.Args[1] 36400 sh := v.Args[0] 36401 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 36402 break 36403 } 36404 x0 := sh.Args[0] 36405 if x0.Op != OpAMD64MOVBloadidx1 { 36406 break 36407 } 36408 i0 := x0.AuxInt 36409 s := x0.Aux 36410 mem := x0.Args[2] 36411 p := x0.Args[0] 36412 idx := x0.Args[1] 36413 x1 := v.Args[1] 36414 if x1.Op != OpAMD64MOVBloadidx1 { 36415 break 36416 } 36417 i1 := x1.AuxInt 36418 if x1.Aux != s { 36419 break 36420 } 36421 _ = x1.Args[2] 36422 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36423 break 36424 } 36425 b = mergePoint(b, x0, x1) 36426 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 36427 v.reset(OpCopy) 36428 v.AddArg(v0) 36429 v0.AuxInt = 8 36430 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 36431 v1.AuxInt = i0 36432 v1.Aux = s 36433 v1.AddArg(p) 36434 v1.AddArg(idx) 36435 v1.AddArg(mem) 36436 v0.AddArg(v1) 36437 return true 36438 } 36439 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 36440 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 36441 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 36442 for { 36443 _ = v.Args[1] 36444 sh := v.Args[0] 36445 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { 36446 break 36447 } 36448 x0 := sh.Args[0] 36449 if x0.Op != OpAMD64MOVBloadidx1 { 36450 break 36451 } 36452 i0 := x0.AuxInt 36453 s := x0.Aux 36454 mem := x0.Args[2] 36455 idx := x0.Args[0] 36456 p := x0.Args[1] 36457 x1 := v.Args[1] 36458 if x1.Op != OpAMD64MOVBloadidx1 { 36459 break 36460 } 36461 i1 := x1.AuxInt 36462 if x1.Aux != s { 36463 break 36464 } 36465 _ = x1.Args[2] 36466 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 36467 break 36468 } 36469 b = mergePoint(b, x0, x1) 36470 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 36471 v.reset(OpCopy) 36472 v.AddArg(v0) 36473 v0.AuxInt = 8 36474 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 36475 v1.AuxInt = i0 36476 v1.Aux = s 36477 v1.AddArg(p) 36478 v1.AddArg(idx) 36479 v1.AddArg(mem) 36480 v0.AddArg(v1) 36481 return true 36482 } 36483 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 36484 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 36485 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 36486 for { 36487 _ = v.Args[1] 36488 r1 := v.Args[0] 36489 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 36490 break 36491 } 36492 x1 := r1.Args[0] 36493 if x1.Op != OpAMD64MOVWloadidx1 { 36494 break 36495 } 36496 i1 := x1.AuxInt 36497 s := x1.Aux 36498 mem := x1.Args[2] 36499 p := x1.Args[0] 36500 idx := x1.Args[1] 36501 sh := v.Args[1] 36502 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 36503 break 36504 } 36505 r0 := sh.Args[0] 36506 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 36507 break 36508 } 36509 x0 := r0.Args[0] 36510 if x0.Op != OpAMD64MOVWloadidx1 { 36511 break 36512 } 36513 i0 := x0.AuxInt 36514 if x0.Aux != s { 36515 break 36516 } 36517 _ = x0.Args[2] 36518 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 36519 break 36520 } 36521 b = mergePoint(b, x0, x1) 36522 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 36523 v.reset(OpCopy) 36524 v.AddArg(v0) 36525 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 36526 v1.AuxInt = i0 36527 v1.Aux = s 36528 v1.AddArg(p) 36529 v1.AddArg(idx) 36530 v1.AddArg(mem) 36531 v0.AddArg(v1) 36532 return true 36533 } 36534 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 36535 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 36536 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 36537 for { 36538 _ = v.Args[1] 36539 r1 := v.Args[0] 36540 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 36541 break 36542 } 36543 x1 := r1.Args[0] 36544 if x1.Op != OpAMD64MOVWloadidx1 { 36545 break 36546 } 36547 i1 := x1.AuxInt 36548 s := x1.Aux 36549 mem := x1.Args[2] 36550 idx := x1.Args[0] 36551 p := x1.Args[1] 36552 sh := v.Args[1] 36553 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 36554 break 36555 } 36556 r0 := sh.Args[0] 36557 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 36558 break 36559 } 36560 x0 := r0.Args[0] 36561 if x0.Op != OpAMD64MOVWloadidx1 { 36562 break 36563 } 36564 i0 := x0.AuxInt 36565 if x0.Aux != s { 36566 break 36567 } 36568 _ = x0.Args[2] 36569 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 36570 break 36571 } 36572 b = mergePoint(b, x0, x1) 36573 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 36574 v.reset(OpCopy) 36575 v.AddArg(v0) 36576 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 36577 v1.AuxInt = i0 36578 v1.Aux = s 36579 v1.AddArg(p) 36580 v1.AddArg(idx) 36581 v1.AddArg(mem) 36582 v0.AddArg(v1) 36583 return true 36584 } 36585 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 36586 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 36587 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 36588 for { 36589 _ = v.Args[1] 36590 r1 := v.Args[0] 36591 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 36592 break 36593 } 36594 x1 := r1.Args[0] 36595 if x1.Op != OpAMD64MOVWloadidx1 { 36596 break 36597 } 36598 i1 := x1.AuxInt 36599 s := x1.Aux 36600 mem := x1.Args[2] 36601 p := x1.Args[0] 36602 idx := x1.Args[1] 36603 sh := v.Args[1] 36604 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 36605 break 36606 } 36607 r0 := sh.Args[0] 36608 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 36609 break 36610 } 36611 x0 := r0.Args[0] 36612 if x0.Op != OpAMD64MOVWloadidx1 { 36613 break 36614 } 36615 i0 := x0.AuxInt 36616 if x0.Aux != s { 36617 break 36618 } 36619 _ = x0.Args[2] 36620 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 36621 break 36622 } 36623 b = mergePoint(b, x0, x1) 36624 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 36625 v.reset(OpCopy) 36626 v.AddArg(v0) 36627 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 36628 v1.AuxInt = i0 36629 v1.Aux = s 36630 v1.AddArg(p) 36631 v1.AddArg(idx) 36632 v1.AddArg(mem) 36633 v0.AddArg(v1) 36634 return true 36635 } 36636 return false 36637 } 36638 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { 36639 b := v.Block 36640 typ := &b.Func.Config.Types 36641 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 36642 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 36643 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 36644 for { 36645 _ = v.Args[1] 36646 r1 := v.Args[0] 36647 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 36648 break 36649 } 36650 x1 := r1.Args[0] 36651 if x1.Op != OpAMD64MOVWloadidx1 { 36652 break 36653 } 36654 i1 := x1.AuxInt 36655 s := x1.Aux 36656 mem := x1.Args[2] 36657 idx := x1.Args[0] 36658 p := x1.Args[1] 36659 sh := v.Args[1] 36660 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 36661 break 36662 } 36663 r0 := sh.Args[0] 36664 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 36665 break 36666 } 36667 x0 := r0.Args[0] 36668 if x0.Op != OpAMD64MOVWloadidx1 { 36669 break 36670 } 36671 i0 := x0.AuxInt 36672 if x0.Aux != s { 36673 break 36674 } 36675 _ = x0.Args[2] 36676 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 36677 break 36678 } 36679 b = mergePoint(b, x0, x1) 36680 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 36681 v.reset(OpCopy) 36682 v.AddArg(v0) 36683 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 36684 v1.AuxInt = i0 36685 v1.Aux = s 36686 v1.AddArg(p) 36687 v1.AddArg(idx) 36688 v1.AddArg(mem) 36689 v0.AddArg(v1) 36690 return true 36691 } 36692 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 36693 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 36694 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 36695 for { 36696 _ = v.Args[1] 36697 sh := v.Args[0] 36698 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 36699 break 36700 } 36701 r0 := sh.Args[0] 36702 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 36703 break 36704 } 36705 x0 := r0.Args[0] 36706 if x0.Op != OpAMD64MOVWloadidx1 { 36707 break 36708 } 36709 i0 := x0.AuxInt 36710 s := x0.Aux 36711 mem := x0.Args[2] 36712 p := x0.Args[0] 36713 idx := x0.Args[1] 36714 r1 := v.Args[1] 36715 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 36716 break 36717 } 36718 x1 := r1.Args[0] 36719 if x1.Op != OpAMD64MOVWloadidx1 { 36720 break 36721 } 36722 i1 := x1.AuxInt 36723 if x1.Aux != s { 36724 break 36725 } 36726 _ = x1.Args[2] 36727 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 36728 break 36729 } 36730 b = mergePoint(b, x0, x1) 36731 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 36732 v.reset(OpCopy) 36733 v.AddArg(v0) 36734 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 36735 v1.AuxInt = i0 36736 v1.Aux = s 36737 v1.AddArg(p) 36738 v1.AddArg(idx) 36739 v1.AddArg(mem) 36740 v0.AddArg(v1) 36741 return true 36742 } 36743 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 36744 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 36745 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 36746 for { 36747 _ = v.Args[1] 36748 sh := v.Args[0] 36749 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 36750 break 36751 } 36752 r0 := sh.Args[0] 36753 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 36754 break 36755 } 36756 x0 := r0.Args[0] 36757 if x0.Op != OpAMD64MOVWloadidx1 { 36758 break 36759 } 36760 i0 := x0.AuxInt 36761 s := x0.Aux 36762 mem := x0.Args[2] 36763 idx := x0.Args[0] 36764 p := x0.Args[1] 36765 r1 := v.Args[1] 36766 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 36767 break 36768 } 36769 x1 := r1.Args[0] 36770 if x1.Op != OpAMD64MOVWloadidx1 { 36771 break 36772 } 36773 i1 := x1.AuxInt 36774 if x1.Aux != s { 36775 break 36776 } 36777 _ = x1.Args[2] 36778 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 36779 break 36780 } 36781 b = mergePoint(b, x0, x1) 36782 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 36783 v.reset(OpCopy) 36784 v.AddArg(v0) 36785 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 36786 v1.AuxInt = i0 36787 v1.Aux = s 36788 v1.AddArg(p) 36789 v1.AddArg(idx) 36790 v1.AddArg(mem) 36791 v0.AddArg(v1) 36792 return true 36793 } 36794 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 36795 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 36796 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 36797 for { 36798 _ = v.Args[1] 36799 sh := v.Args[0] 36800 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 36801 break 36802 } 36803 r0 := sh.Args[0] 36804 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 36805 break 36806 } 36807 x0 := r0.Args[0] 36808 if x0.Op != OpAMD64MOVWloadidx1 { 36809 break 36810 } 36811 i0 := x0.AuxInt 36812 s := x0.Aux 36813 mem := x0.Args[2] 36814 p := x0.Args[0] 36815 idx := x0.Args[1] 36816 r1 := v.Args[1] 36817 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 36818 break 36819 } 36820 x1 := r1.Args[0] 36821 if x1.Op != OpAMD64MOVWloadidx1 { 36822 break 36823 } 36824 i1 := x1.AuxInt 36825 if x1.Aux != s { 36826 break 36827 } 36828 _ = x1.Args[2] 36829 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 36830 break 36831 } 36832 b = mergePoint(b, x0, x1) 36833 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 36834 v.reset(OpCopy) 36835 v.AddArg(v0) 36836 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 36837 v1.AuxInt = i0 36838 v1.Aux = s 36839 v1.AddArg(p) 36840 v1.AddArg(idx) 36841 v1.AddArg(mem) 36842 v0.AddArg(v1) 36843 return true 36844 } 36845 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 36846 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 36847 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 36848 for { 36849 _ = v.Args[1] 36850 sh := v.Args[0] 36851 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { 36852 break 36853 } 36854 r0 := sh.Args[0] 36855 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 36856 break 36857 } 36858 x0 := r0.Args[0] 36859 if x0.Op != OpAMD64MOVWloadidx1 { 36860 break 36861 } 36862 i0 := x0.AuxInt 36863 s := x0.Aux 36864 mem := x0.Args[2] 36865 idx := x0.Args[0] 36866 p := x0.Args[1] 36867 r1 := v.Args[1] 36868 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 36869 break 36870 } 36871 x1 := r1.Args[0] 36872 if x1.Op != OpAMD64MOVWloadidx1 { 36873 break 36874 } 36875 i1 := x1.AuxInt 36876 if x1.Aux != s { 36877 break 36878 } 36879 _ = x1.Args[2] 36880 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 36881 break 36882 } 36883 b = mergePoint(b, x0, x1) 36884 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 36885 v.reset(OpCopy) 36886 v.AddArg(v0) 36887 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 36888 v1.AuxInt = i0 36889 v1.Aux = s 36890 v1.AddArg(p) 36891 v1.AddArg(idx) 36892 v1.AddArg(mem) 36893 v0.AddArg(v1) 36894 return true 36895 } 36896 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 36897 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 36898 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 36899 for { 36900 _ = v.Args[1] 36901 r1 := v.Args[0] 36902 if r1.Op != OpAMD64BSWAPL { 36903 break 36904 } 36905 x1 := r1.Args[0] 36906 if x1.Op != OpAMD64MOVLloadidx1 { 36907 break 36908 } 36909 i1 := x1.AuxInt 36910 s := x1.Aux 36911 mem := x1.Args[2] 36912 p := x1.Args[0] 36913 idx := x1.Args[1] 36914 sh := v.Args[1] 36915 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 36916 break 36917 } 36918 r0 := sh.Args[0] 36919 if r0.Op != OpAMD64BSWAPL { 36920 break 36921 } 36922 x0 := r0.Args[0] 36923 if x0.Op != OpAMD64MOVLloadidx1 { 36924 break 36925 } 36926 i0 := x0.AuxInt 36927 if x0.Aux != s { 36928 break 36929 } 36930 _ = x0.Args[2] 36931 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 36932 break 36933 } 36934 b = mergePoint(b, x0, x1) 36935 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 36936 v.reset(OpCopy) 36937 v.AddArg(v0) 36938 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 36939 v1.AuxInt = i0 36940 v1.Aux = s 36941 v1.AddArg(p) 36942 v1.AddArg(idx) 36943 v1.AddArg(mem) 36944 v0.AddArg(v1) 36945 return true 36946 } 36947 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 36948 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 36949 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 36950 for { 36951 _ = v.Args[1] 36952 r1 := v.Args[0] 36953 if r1.Op != OpAMD64BSWAPL { 36954 break 36955 } 36956 x1 := r1.Args[0] 36957 if x1.Op != OpAMD64MOVLloadidx1 { 36958 break 36959 } 36960 i1 := x1.AuxInt 36961 s := x1.Aux 36962 mem := x1.Args[2] 36963 idx := x1.Args[0] 36964 p := x1.Args[1] 36965 sh := v.Args[1] 36966 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 36967 break 36968 } 36969 r0 := sh.Args[0] 36970 if r0.Op != OpAMD64BSWAPL { 36971 break 36972 } 36973 x0 := r0.Args[0] 36974 if x0.Op != OpAMD64MOVLloadidx1 { 36975 break 36976 } 36977 i0 := x0.AuxInt 36978 if x0.Aux != s { 36979 break 36980 } 36981 _ = x0.Args[2] 36982 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 36983 break 36984 } 36985 b = mergePoint(b, x0, x1) 36986 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 36987 v.reset(OpCopy) 36988 v.AddArg(v0) 36989 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 36990 v1.AuxInt = i0 36991 v1.Aux = s 36992 v1.AddArg(p) 36993 v1.AddArg(idx) 36994 v1.AddArg(mem) 36995 v0.AddArg(v1) 36996 return true 36997 } 36998 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 36999 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 37000 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 37001 for { 37002 _ = v.Args[1] 37003 r1 := v.Args[0] 37004 if r1.Op != OpAMD64BSWAPL { 37005 break 37006 } 37007 x1 := r1.Args[0] 37008 if x1.Op != OpAMD64MOVLloadidx1 { 37009 break 37010 } 37011 i1 := x1.AuxInt 37012 s := x1.Aux 37013 mem := x1.Args[2] 37014 p := x1.Args[0] 37015 idx := x1.Args[1] 37016 sh := v.Args[1] 37017 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 37018 break 37019 } 37020 r0 := sh.Args[0] 37021 if r0.Op != OpAMD64BSWAPL { 37022 break 37023 } 37024 x0 := r0.Args[0] 37025 if x0.Op != OpAMD64MOVLloadidx1 { 37026 break 37027 } 37028 i0 := x0.AuxInt 37029 if x0.Aux != s { 37030 break 37031 } 37032 _ = x0.Args[2] 37033 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 37034 break 37035 } 37036 b = mergePoint(b, x0, x1) 37037 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 37038 v.reset(OpCopy) 37039 v.AddArg(v0) 37040 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 37041 v1.AuxInt = i0 37042 v1.Aux = s 37043 v1.AddArg(p) 37044 v1.AddArg(idx) 37045 v1.AddArg(mem) 37046 v0.AddArg(v1) 37047 return true 37048 } 37049 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 37050 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 37051 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 37052 for { 37053 _ = v.Args[1] 37054 r1 := v.Args[0] 37055 if r1.Op != OpAMD64BSWAPL { 37056 break 37057 } 37058 x1 := r1.Args[0] 37059 if x1.Op != OpAMD64MOVLloadidx1 { 37060 break 37061 } 37062 i1 := x1.AuxInt 37063 s := x1.Aux 37064 mem := x1.Args[2] 37065 idx := x1.Args[0] 37066 p := x1.Args[1] 37067 sh := v.Args[1] 37068 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 37069 break 37070 } 37071 r0 := sh.Args[0] 37072 if r0.Op != OpAMD64BSWAPL { 37073 break 37074 } 37075 x0 := r0.Args[0] 37076 if x0.Op != OpAMD64MOVLloadidx1 { 37077 break 37078 } 37079 i0 := x0.AuxInt 37080 if x0.Aux != s { 37081 break 37082 } 37083 _ = x0.Args[2] 37084 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 37085 break 37086 } 37087 b = mergePoint(b, x0, x1) 37088 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 37089 v.reset(OpCopy) 37090 v.AddArg(v0) 37091 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 37092 v1.AuxInt = i0 37093 v1.Aux = s 37094 v1.AddArg(p) 37095 v1.AddArg(idx) 37096 v1.AddArg(mem) 37097 v0.AddArg(v1) 37098 return true 37099 } 37100 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 37101 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 37102 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 37103 for { 37104 _ = v.Args[1] 37105 sh := v.Args[0] 37106 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 37107 break 37108 } 37109 r0 := sh.Args[0] 37110 if r0.Op != OpAMD64BSWAPL { 37111 break 37112 } 37113 x0 := r0.Args[0] 37114 if x0.Op != OpAMD64MOVLloadidx1 { 37115 break 37116 } 37117 i0 := x0.AuxInt 37118 s := x0.Aux 37119 mem := x0.Args[2] 37120 p := x0.Args[0] 37121 idx := x0.Args[1] 37122 r1 := v.Args[1] 37123 if r1.Op != OpAMD64BSWAPL { 37124 break 37125 } 37126 x1 := r1.Args[0] 37127 if x1.Op != OpAMD64MOVLloadidx1 { 37128 break 37129 } 37130 i1 := x1.AuxInt 37131 if x1.Aux != s { 37132 break 37133 } 37134 _ = x1.Args[2] 37135 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 37136 break 37137 } 37138 b = mergePoint(b, x0, x1) 37139 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 37140 v.reset(OpCopy) 37141 v.AddArg(v0) 37142 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 37143 v1.AuxInt = i0 37144 v1.Aux = s 37145 v1.AddArg(p) 37146 v1.AddArg(idx) 37147 v1.AddArg(mem) 37148 v0.AddArg(v1) 37149 return true 37150 } 37151 return false 37152 } 37153 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { 37154 b := v.Block 37155 typ := &b.Func.Config.Types 37156 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 37157 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 37158 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 37159 for { 37160 _ = v.Args[1] 37161 sh := v.Args[0] 37162 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 37163 break 37164 } 37165 r0 := sh.Args[0] 37166 if r0.Op != OpAMD64BSWAPL { 37167 break 37168 } 37169 x0 := r0.Args[0] 37170 if x0.Op != OpAMD64MOVLloadidx1 { 37171 break 37172 } 37173 i0 := x0.AuxInt 37174 s := x0.Aux 37175 mem := x0.Args[2] 37176 idx := x0.Args[0] 37177 p := x0.Args[1] 37178 r1 := v.Args[1] 37179 if r1.Op != OpAMD64BSWAPL { 37180 break 37181 } 37182 x1 := r1.Args[0] 37183 if x1.Op != OpAMD64MOVLloadidx1 { 37184 break 37185 } 37186 i1 := x1.AuxInt 37187 if x1.Aux != s { 37188 break 37189 } 37190 _ = x1.Args[2] 37191 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 37192 break 37193 } 37194 b = mergePoint(b, x0, x1) 37195 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 37196 v.reset(OpCopy) 37197 v.AddArg(v0) 37198 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 37199 v1.AuxInt = i0 37200 v1.Aux = s 37201 v1.AddArg(p) 37202 v1.AddArg(idx) 37203 v1.AddArg(mem) 37204 v0.AddArg(v1) 37205 return true 37206 } 37207 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 37208 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 37209 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 37210 for { 37211 _ = v.Args[1] 37212 sh := v.Args[0] 37213 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 37214 break 37215 } 37216 r0 := sh.Args[0] 37217 if r0.Op != OpAMD64BSWAPL { 37218 break 37219 } 37220 x0 := r0.Args[0] 37221 if x0.Op != OpAMD64MOVLloadidx1 { 37222 break 37223 } 37224 i0 := x0.AuxInt 37225 s := x0.Aux 37226 mem := x0.Args[2] 37227 p := x0.Args[0] 37228 idx := x0.Args[1] 37229 r1 := v.Args[1] 37230 if r1.Op != OpAMD64BSWAPL { 37231 break 37232 } 37233 x1 := r1.Args[0] 37234 if x1.Op != OpAMD64MOVLloadidx1 { 37235 break 37236 } 37237 i1 := x1.AuxInt 37238 if x1.Aux != s { 37239 break 37240 } 37241 _ = x1.Args[2] 37242 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 37243 break 37244 } 37245 b = mergePoint(b, x0, x1) 37246 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 37247 v.reset(OpCopy) 37248 v.AddArg(v0) 37249 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 37250 v1.AuxInt = i0 37251 v1.Aux = s 37252 v1.AddArg(p) 37253 v1.AddArg(idx) 37254 v1.AddArg(mem) 37255 v0.AddArg(v1) 37256 return true 37257 } 37258 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 37259 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 37260 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 37261 for { 37262 _ = v.Args[1] 37263 sh := v.Args[0] 37264 if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { 37265 break 37266 } 37267 r0 := sh.Args[0] 37268 if r0.Op != OpAMD64BSWAPL { 37269 break 37270 } 37271 x0 := r0.Args[0] 37272 if x0.Op != OpAMD64MOVLloadidx1 { 37273 break 37274 } 37275 i0 := x0.AuxInt 37276 s := x0.Aux 37277 mem := x0.Args[2] 37278 idx := x0.Args[0] 37279 p := x0.Args[1] 37280 r1 := v.Args[1] 37281 if r1.Op != OpAMD64BSWAPL { 37282 break 37283 } 37284 x1 := r1.Args[0] 37285 if x1.Op != OpAMD64MOVLloadidx1 { 37286 break 37287 } 37288 i1 := x1.AuxInt 37289 if x1.Aux != s { 37290 break 37291 } 37292 _ = x1.Args[2] 37293 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 37294 break 37295 } 37296 b = mergePoint(b, x0, x1) 37297 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 37298 v.reset(OpCopy) 37299 v.AddArg(v0) 37300 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 37301 v1.AuxInt = i0 37302 v1.Aux = s 37303 v1.AddArg(p) 37304 v1.AddArg(idx) 37305 v1.AddArg(mem) 37306 v0.AddArg(v1) 37307 return true 37308 } 37309 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 37310 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37311 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 37312 for { 37313 _ = v.Args[1] 37314 s0 := v.Args[0] 37315 if s0.Op != OpAMD64SHLQconst { 37316 break 37317 } 37318 j0 := s0.AuxInt 37319 x0 := s0.Args[0] 37320 if x0.Op != OpAMD64MOVBloadidx1 { 37321 break 37322 } 37323 i0 := x0.AuxInt 37324 s := x0.Aux 37325 mem := x0.Args[2] 37326 p := x0.Args[0] 37327 idx := x0.Args[1] 37328 or := v.Args[1] 37329 if or.Op != OpAMD64ORQ { 37330 break 37331 } 37332 y := or.Args[1] 37333 s1 := or.Args[0] 37334 if s1.Op != OpAMD64SHLQconst { 37335 break 37336 } 37337 j1 := s1.AuxInt 37338 x1 := s1.Args[0] 37339 if x1.Op != OpAMD64MOVBloadidx1 { 37340 break 37341 } 37342 i1 := x1.AuxInt 37343 if x1.Aux != s { 37344 break 37345 } 37346 _ = x1.Args[2] 37347 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37348 break 37349 } 37350 b = mergePoint(b, x0, x1, y) 37351 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37352 v.reset(OpCopy) 37353 v.AddArg(v0) 37354 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37355 v1.AuxInt = j1 37356 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 37357 v2.AuxInt = 8 37358 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 37359 v3.AuxInt = i0 37360 v3.Aux = s 37361 v3.AddArg(p) 37362 v3.AddArg(idx) 37363 v3.AddArg(mem) 37364 v2.AddArg(v3) 37365 v1.AddArg(v2) 37366 v0.AddArg(v1) 37367 v0.AddArg(y) 37368 return true 37369 } 37370 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 37371 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37372 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 37373 for { 37374 _ = v.Args[1] 37375 s0 := v.Args[0] 37376 if s0.Op != OpAMD64SHLQconst { 37377 break 37378 } 37379 j0 := s0.AuxInt 37380 x0 := s0.Args[0] 37381 if x0.Op != OpAMD64MOVBloadidx1 { 37382 break 37383 } 37384 i0 := x0.AuxInt 37385 s := x0.Aux 37386 mem := x0.Args[2] 37387 idx := x0.Args[0] 37388 p := x0.Args[1] 37389 or := v.Args[1] 37390 if or.Op != OpAMD64ORQ { 37391 break 37392 } 37393 y := or.Args[1] 37394 s1 := or.Args[0] 37395 if s1.Op != OpAMD64SHLQconst { 37396 break 37397 } 37398 j1 := s1.AuxInt 37399 x1 := s1.Args[0] 37400 if x1.Op != OpAMD64MOVBloadidx1 { 37401 break 37402 } 37403 i1 := x1.AuxInt 37404 if x1.Aux != s { 37405 break 37406 } 37407 _ = x1.Args[2] 37408 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37409 break 37410 } 37411 b = mergePoint(b, x0, x1, y) 37412 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37413 v.reset(OpCopy) 37414 v.AddArg(v0) 37415 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37416 v1.AuxInt = j1 37417 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 37418 v2.AuxInt = 8 37419 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 37420 v3.AuxInt = i0 37421 v3.Aux = s 37422 v3.AddArg(p) 37423 v3.AddArg(idx) 37424 v3.AddArg(mem) 37425 v2.AddArg(v3) 37426 v1.AddArg(v2) 37427 v0.AddArg(v1) 37428 v0.AddArg(y) 37429 return true 37430 } 37431 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 37432 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37433 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 37434 for { 37435 _ = v.Args[1] 37436 s0 := v.Args[0] 37437 if s0.Op != OpAMD64SHLQconst { 37438 break 37439 } 37440 j0 := s0.AuxInt 37441 x0 := s0.Args[0] 37442 if x0.Op != OpAMD64MOVBloadidx1 { 37443 break 37444 } 37445 i0 := x0.AuxInt 37446 s := x0.Aux 37447 mem := x0.Args[2] 37448 p := x0.Args[0] 37449 idx := x0.Args[1] 37450 or := v.Args[1] 37451 if or.Op != OpAMD64ORQ { 37452 break 37453 } 37454 y := or.Args[1] 37455 s1 := or.Args[0] 37456 if s1.Op != OpAMD64SHLQconst { 37457 break 37458 } 37459 j1 := s1.AuxInt 37460 x1 := s1.Args[0] 37461 if x1.Op != OpAMD64MOVBloadidx1 { 37462 break 37463 } 37464 i1 := x1.AuxInt 37465 if x1.Aux != s { 37466 break 37467 } 37468 _ = x1.Args[2] 37469 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37470 break 37471 } 37472 b = mergePoint(b, x0, x1, y) 37473 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37474 v.reset(OpCopy) 37475 v.AddArg(v0) 37476 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37477 v1.AuxInt = j1 37478 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 37479 v2.AuxInt = 8 37480 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 37481 v3.AuxInt = i0 37482 v3.Aux = s 37483 v3.AddArg(p) 37484 v3.AddArg(idx) 37485 v3.AddArg(mem) 37486 v2.AddArg(v3) 37487 v1.AddArg(v2) 37488 v0.AddArg(v1) 37489 v0.AddArg(y) 37490 return true 37491 } 37492 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 37493 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37494 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 37495 for { 37496 _ = v.Args[1] 37497 s0 := v.Args[0] 37498 if s0.Op != OpAMD64SHLQconst { 37499 break 37500 } 37501 j0 := s0.AuxInt 37502 x0 := s0.Args[0] 37503 if x0.Op != OpAMD64MOVBloadidx1 { 37504 break 37505 } 37506 i0 := x0.AuxInt 37507 s := x0.Aux 37508 mem := x0.Args[2] 37509 idx := x0.Args[0] 37510 p := x0.Args[1] 37511 or := v.Args[1] 37512 if or.Op != OpAMD64ORQ { 37513 break 37514 } 37515 y := or.Args[1] 37516 s1 := or.Args[0] 37517 if s1.Op != OpAMD64SHLQconst { 37518 break 37519 } 37520 j1 := s1.AuxInt 37521 x1 := s1.Args[0] 37522 if x1.Op != OpAMD64MOVBloadidx1 { 37523 break 37524 } 37525 i1 := x1.AuxInt 37526 if x1.Aux != s { 37527 break 37528 } 37529 _ = x1.Args[2] 37530 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37531 break 37532 } 37533 b = mergePoint(b, x0, x1, y) 37534 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37535 v.reset(OpCopy) 37536 v.AddArg(v0) 37537 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37538 v1.AuxInt = j1 37539 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 37540 v2.AuxInt = 8 37541 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 37542 v3.AuxInt = i0 37543 v3.Aux = s 37544 v3.AddArg(p) 37545 v3.AddArg(idx) 37546 v3.AddArg(mem) 37547 v2.AddArg(v3) 37548 v1.AddArg(v2) 37549 v0.AddArg(v1) 37550 v0.AddArg(y) 37551 return true 37552 } 37553 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 37554 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37555 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 37556 for { 37557 _ = v.Args[1] 37558 s0 := v.Args[0] 37559 if s0.Op != OpAMD64SHLQconst { 37560 break 37561 } 37562 j0 := s0.AuxInt 37563 x0 := s0.Args[0] 37564 if x0.Op != OpAMD64MOVBloadidx1 { 37565 break 37566 } 37567 i0 := x0.AuxInt 37568 s := x0.Aux 37569 mem := x0.Args[2] 37570 p := x0.Args[0] 37571 idx := x0.Args[1] 37572 or := v.Args[1] 37573 if or.Op != OpAMD64ORQ { 37574 break 37575 } 37576 _ = or.Args[1] 37577 y := or.Args[0] 37578 s1 := or.Args[1] 37579 if s1.Op != OpAMD64SHLQconst { 37580 break 37581 } 37582 j1 := s1.AuxInt 37583 x1 := s1.Args[0] 37584 if x1.Op != OpAMD64MOVBloadidx1 { 37585 break 37586 } 37587 i1 := x1.AuxInt 37588 if x1.Aux != s { 37589 break 37590 } 37591 _ = x1.Args[2] 37592 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37593 break 37594 } 37595 b = mergePoint(b, x0, x1, y) 37596 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37597 v.reset(OpCopy) 37598 v.AddArg(v0) 37599 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37600 v1.AuxInt = j1 37601 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 37602 v2.AuxInt = 8 37603 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 37604 v3.AuxInt = i0 37605 v3.Aux = s 37606 v3.AddArg(p) 37607 v3.AddArg(idx) 37608 v3.AddArg(mem) 37609 v2.AddArg(v3) 37610 v1.AddArg(v2) 37611 v0.AddArg(v1) 37612 v0.AddArg(y) 37613 return true 37614 } 37615 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 37616 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37617 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 37618 for { 37619 _ = v.Args[1] 37620 s0 := v.Args[0] 37621 if s0.Op != OpAMD64SHLQconst { 37622 break 37623 } 37624 j0 := s0.AuxInt 37625 x0 := s0.Args[0] 37626 if x0.Op != OpAMD64MOVBloadidx1 { 37627 break 37628 } 37629 i0 := x0.AuxInt 37630 s := x0.Aux 37631 mem := x0.Args[2] 37632 idx := x0.Args[0] 37633 p := x0.Args[1] 37634 or := v.Args[1] 37635 if or.Op != OpAMD64ORQ { 37636 break 37637 } 37638 _ = or.Args[1] 37639 y := or.Args[0] 37640 s1 := or.Args[1] 37641 if s1.Op != OpAMD64SHLQconst { 37642 break 37643 } 37644 j1 := s1.AuxInt 37645 x1 := s1.Args[0] 37646 if x1.Op != OpAMD64MOVBloadidx1 { 37647 break 37648 } 37649 i1 := x1.AuxInt 37650 if x1.Aux != s { 37651 break 37652 } 37653 _ = x1.Args[2] 37654 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37655 break 37656 } 37657 b = mergePoint(b, x0, x1, y) 37658 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37659 v.reset(OpCopy) 37660 v.AddArg(v0) 37661 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37662 v1.AuxInt = j1 37663 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 37664 v2.AuxInt = 8 37665 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 37666 v3.AuxInt = i0 37667 v3.Aux = s 37668 v3.AddArg(p) 37669 v3.AddArg(idx) 37670 v3.AddArg(mem) 37671 v2.AddArg(v3) 37672 v1.AddArg(v2) 37673 v0.AddArg(v1) 37674 v0.AddArg(y) 37675 return true 37676 } 37677 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 37678 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37679 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 37680 for { 37681 _ = v.Args[1] 37682 s0 := v.Args[0] 37683 if s0.Op != OpAMD64SHLQconst { 37684 break 37685 } 37686 j0 := s0.AuxInt 37687 x0 := s0.Args[0] 37688 if x0.Op != OpAMD64MOVBloadidx1 { 37689 break 37690 } 37691 i0 := x0.AuxInt 37692 s := x0.Aux 37693 mem := x0.Args[2] 37694 p := x0.Args[0] 37695 idx := x0.Args[1] 37696 or := v.Args[1] 37697 if or.Op != OpAMD64ORQ { 37698 break 37699 } 37700 _ = or.Args[1] 37701 y := or.Args[0] 37702 s1 := or.Args[1] 37703 if s1.Op != OpAMD64SHLQconst { 37704 break 37705 } 37706 j1 := s1.AuxInt 37707 x1 := s1.Args[0] 37708 if x1.Op != OpAMD64MOVBloadidx1 { 37709 break 37710 } 37711 i1 := x1.AuxInt 37712 if x1.Aux != s { 37713 break 37714 } 37715 _ = x1.Args[2] 37716 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37717 break 37718 } 37719 b = mergePoint(b, x0, x1, y) 37720 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37721 v.reset(OpCopy) 37722 v.AddArg(v0) 37723 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37724 v1.AuxInt = j1 37725 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 37726 v2.AuxInt = 8 37727 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 37728 v3.AuxInt = i0 37729 v3.Aux = s 37730 v3.AddArg(p) 37731 v3.AddArg(idx) 37732 v3.AddArg(mem) 37733 v2.AddArg(v3) 37734 v1.AddArg(v2) 37735 v0.AddArg(v1) 37736 v0.AddArg(y) 37737 return true 37738 } 37739 return false 37740 } 37741 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { 37742 b := v.Block 37743 typ := &b.Func.Config.Types 37744 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 37745 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37746 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 37747 for { 37748 _ = v.Args[1] 37749 s0 := v.Args[0] 37750 if s0.Op != OpAMD64SHLQconst { 37751 break 37752 } 37753 j0 := s0.AuxInt 37754 x0 := s0.Args[0] 37755 if x0.Op != OpAMD64MOVBloadidx1 { 37756 break 37757 } 37758 i0 := x0.AuxInt 37759 s := x0.Aux 37760 mem := x0.Args[2] 37761 idx := x0.Args[0] 37762 p := x0.Args[1] 37763 or := v.Args[1] 37764 if or.Op != OpAMD64ORQ { 37765 break 37766 } 37767 _ = or.Args[1] 37768 y := or.Args[0] 37769 s1 := or.Args[1] 37770 if s1.Op != OpAMD64SHLQconst { 37771 break 37772 } 37773 j1 := s1.AuxInt 37774 x1 := s1.Args[0] 37775 if x1.Op != OpAMD64MOVBloadidx1 { 37776 break 37777 } 37778 i1 := x1.AuxInt 37779 if x1.Aux != s { 37780 break 37781 } 37782 _ = x1.Args[2] 37783 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37784 break 37785 } 37786 b = mergePoint(b, x0, x1, y) 37787 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37788 v.reset(OpCopy) 37789 v.AddArg(v0) 37790 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37791 v1.AuxInt = j1 37792 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 37793 v2.AuxInt = 8 37794 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 37795 v3.AuxInt = i0 37796 v3.Aux = s 37797 v3.AddArg(p) 37798 v3.AddArg(idx) 37799 v3.AddArg(mem) 37800 v2.AddArg(v3) 37801 v1.AddArg(v2) 37802 v0.AddArg(v1) 37803 v0.AddArg(y) 37804 return true 37805 } 37806 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 37807 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37808 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 37809 for { 37810 _ = v.Args[1] 37811 or := v.Args[0] 37812 if or.Op != OpAMD64ORQ { 37813 break 37814 } 37815 y := or.Args[1] 37816 s1 := or.Args[0] 37817 if s1.Op != OpAMD64SHLQconst { 37818 break 37819 } 37820 j1 := s1.AuxInt 37821 x1 := s1.Args[0] 37822 if x1.Op != OpAMD64MOVBloadidx1 { 37823 break 37824 } 37825 i1 := x1.AuxInt 37826 s := x1.Aux 37827 mem := x1.Args[2] 37828 p := x1.Args[0] 37829 idx := x1.Args[1] 37830 s0 := v.Args[1] 37831 if s0.Op != OpAMD64SHLQconst { 37832 break 37833 } 37834 j0 := s0.AuxInt 37835 x0 := s0.Args[0] 37836 if x0.Op != OpAMD64MOVBloadidx1 { 37837 break 37838 } 37839 i0 := x0.AuxInt 37840 if x0.Aux != s { 37841 break 37842 } 37843 _ = x0.Args[2] 37844 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37845 break 37846 } 37847 b = mergePoint(b, x0, x1, y) 37848 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37849 v.reset(OpCopy) 37850 v.AddArg(v0) 37851 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37852 v1.AuxInt = j1 37853 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 37854 v2.AuxInt = 8 37855 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 37856 v3.AuxInt = i0 37857 v3.Aux = s 37858 v3.AddArg(p) 37859 v3.AddArg(idx) 37860 v3.AddArg(mem) 37861 v2.AddArg(v3) 37862 v1.AddArg(v2) 37863 v0.AddArg(v1) 37864 v0.AddArg(y) 37865 return true 37866 } 37867 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 37868 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37869 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 37870 for { 37871 _ = v.Args[1] 37872 or := v.Args[0] 37873 if or.Op != OpAMD64ORQ { 37874 break 37875 } 37876 y := or.Args[1] 37877 s1 := or.Args[0] 37878 if s1.Op != OpAMD64SHLQconst { 37879 break 37880 } 37881 j1 := s1.AuxInt 37882 x1 := s1.Args[0] 37883 if x1.Op != OpAMD64MOVBloadidx1 { 37884 break 37885 } 37886 i1 := x1.AuxInt 37887 s := x1.Aux 37888 mem := x1.Args[2] 37889 idx := x1.Args[0] 37890 p := x1.Args[1] 37891 s0 := v.Args[1] 37892 if s0.Op != OpAMD64SHLQconst { 37893 break 37894 } 37895 j0 := s0.AuxInt 37896 x0 := s0.Args[0] 37897 if x0.Op != OpAMD64MOVBloadidx1 { 37898 break 37899 } 37900 i0 := x0.AuxInt 37901 if x0.Aux != s { 37902 break 37903 } 37904 _ = x0.Args[2] 37905 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37906 break 37907 } 37908 b = mergePoint(b, x0, x1, y) 37909 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37910 v.reset(OpCopy) 37911 v.AddArg(v0) 37912 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37913 v1.AuxInt = j1 37914 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 37915 v2.AuxInt = 8 37916 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 37917 v3.AuxInt = i0 37918 v3.Aux = s 37919 v3.AddArg(p) 37920 v3.AddArg(idx) 37921 v3.AddArg(mem) 37922 v2.AddArg(v3) 37923 v1.AddArg(v2) 37924 v0.AddArg(v1) 37925 v0.AddArg(y) 37926 return true 37927 } 37928 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 37929 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37930 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 37931 for { 37932 _ = v.Args[1] 37933 or := v.Args[0] 37934 if or.Op != OpAMD64ORQ { 37935 break 37936 } 37937 _ = or.Args[1] 37938 y := or.Args[0] 37939 s1 := or.Args[1] 37940 if s1.Op != OpAMD64SHLQconst { 37941 break 37942 } 37943 j1 := s1.AuxInt 37944 x1 := s1.Args[0] 37945 if x1.Op != OpAMD64MOVBloadidx1 { 37946 break 37947 } 37948 i1 := x1.AuxInt 37949 s := x1.Aux 37950 mem := x1.Args[2] 37951 p := x1.Args[0] 37952 idx := x1.Args[1] 37953 s0 := v.Args[1] 37954 if s0.Op != OpAMD64SHLQconst { 37955 break 37956 } 37957 j0 := s0.AuxInt 37958 x0 := s0.Args[0] 37959 if x0.Op != OpAMD64MOVBloadidx1 { 37960 break 37961 } 37962 i0 := x0.AuxInt 37963 if x0.Aux != s { 37964 break 37965 } 37966 _ = x0.Args[2] 37967 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 37968 break 37969 } 37970 b = mergePoint(b, x0, x1, y) 37971 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 37972 v.reset(OpCopy) 37973 v.AddArg(v0) 37974 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 37975 v1.AuxInt = j1 37976 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 37977 v2.AuxInt = 8 37978 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 37979 v3.AuxInt = i0 37980 v3.Aux = s 37981 v3.AddArg(p) 37982 v3.AddArg(idx) 37983 v3.AddArg(mem) 37984 v2.AddArg(v3) 37985 v1.AddArg(v2) 37986 v0.AddArg(v1) 37987 v0.AddArg(y) 37988 return true 37989 } 37990 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 37991 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 37992 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 37993 for { 37994 _ = v.Args[1] 37995 or := v.Args[0] 37996 if or.Op != OpAMD64ORQ { 37997 break 37998 } 37999 _ = or.Args[1] 38000 y := or.Args[0] 38001 s1 := or.Args[1] 38002 if s1.Op != OpAMD64SHLQconst { 38003 break 38004 } 38005 j1 := s1.AuxInt 38006 x1 := s1.Args[0] 38007 if x1.Op != OpAMD64MOVBloadidx1 { 38008 break 38009 } 38010 i1 := x1.AuxInt 38011 s := x1.Aux 38012 mem := x1.Args[2] 38013 idx := x1.Args[0] 38014 p := x1.Args[1] 38015 s0 := v.Args[1] 38016 if s0.Op != OpAMD64SHLQconst { 38017 break 38018 } 38019 j0 := s0.AuxInt 38020 x0 := s0.Args[0] 38021 if x0.Op != OpAMD64MOVBloadidx1 { 38022 break 38023 } 38024 i0 := x0.AuxInt 38025 if x0.Aux != s { 38026 break 38027 } 38028 _ = x0.Args[2] 38029 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 38030 break 38031 } 38032 b = mergePoint(b, x0, x1, y) 38033 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38034 v.reset(OpCopy) 38035 v.AddArg(v0) 38036 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38037 v1.AuxInt = j1 38038 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 38039 v2.AuxInt = 8 38040 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 38041 v3.AuxInt = i0 38042 v3.Aux = s 38043 v3.AddArg(p) 38044 v3.AddArg(idx) 38045 v3.AddArg(mem) 38046 v2.AddArg(v3) 38047 v1.AddArg(v2) 38048 v0.AddArg(v1) 38049 v0.AddArg(y) 38050 return true 38051 } 38052 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 38053 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 38054 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 38055 for { 38056 _ = v.Args[1] 38057 or := v.Args[0] 38058 if or.Op != OpAMD64ORQ { 38059 break 38060 } 38061 y := or.Args[1] 38062 s1 := or.Args[0] 38063 if s1.Op != OpAMD64SHLQconst { 38064 break 38065 } 38066 j1 := s1.AuxInt 38067 x1 := s1.Args[0] 38068 if x1.Op != OpAMD64MOVBloadidx1 { 38069 break 38070 } 38071 i1 := x1.AuxInt 38072 s := x1.Aux 38073 mem := x1.Args[2] 38074 p := x1.Args[0] 38075 idx := x1.Args[1] 38076 s0 := v.Args[1] 38077 if s0.Op != OpAMD64SHLQconst { 38078 break 38079 } 38080 j0 := s0.AuxInt 38081 x0 := s0.Args[0] 38082 if x0.Op != OpAMD64MOVBloadidx1 { 38083 break 38084 } 38085 i0 := x0.AuxInt 38086 if x0.Aux != s { 38087 break 38088 } 38089 _ = x0.Args[2] 38090 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 38091 break 38092 } 38093 b = mergePoint(b, x0, x1, y) 38094 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38095 v.reset(OpCopy) 38096 v.AddArg(v0) 38097 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38098 v1.AuxInt = j1 38099 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 38100 v2.AuxInt = 8 38101 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 38102 v3.AuxInt = i0 38103 v3.Aux = s 38104 v3.AddArg(p) 38105 v3.AddArg(idx) 38106 v3.AddArg(mem) 38107 v2.AddArg(v3) 38108 v1.AddArg(v2) 38109 v0.AddArg(v1) 38110 v0.AddArg(y) 38111 return true 38112 } 38113 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 38114 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 38115 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 38116 for { 38117 _ = v.Args[1] 38118 or := v.Args[0] 38119 if or.Op != OpAMD64ORQ { 38120 break 38121 } 38122 y := or.Args[1] 38123 s1 := or.Args[0] 38124 if s1.Op != OpAMD64SHLQconst { 38125 break 38126 } 38127 j1 := s1.AuxInt 38128 x1 := s1.Args[0] 38129 if x1.Op != OpAMD64MOVBloadidx1 { 38130 break 38131 } 38132 i1 := x1.AuxInt 38133 s := x1.Aux 38134 mem := x1.Args[2] 38135 idx := x1.Args[0] 38136 p := x1.Args[1] 38137 s0 := v.Args[1] 38138 if s0.Op != OpAMD64SHLQconst { 38139 break 38140 } 38141 j0 := s0.AuxInt 38142 x0 := s0.Args[0] 38143 if x0.Op != OpAMD64MOVBloadidx1 { 38144 break 38145 } 38146 i0 := x0.AuxInt 38147 if x0.Aux != s { 38148 break 38149 } 38150 _ = x0.Args[2] 38151 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 38152 break 38153 } 38154 b = mergePoint(b, x0, x1, y) 38155 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38156 v.reset(OpCopy) 38157 v.AddArg(v0) 38158 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38159 v1.AuxInt = j1 38160 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 38161 v2.AuxInt = 8 38162 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 38163 v3.AuxInt = i0 38164 v3.Aux = s 38165 v3.AddArg(p) 38166 v3.AddArg(idx) 38167 v3.AddArg(mem) 38168 v2.AddArg(v3) 38169 v1.AddArg(v2) 38170 v0.AddArg(v1) 38171 v0.AddArg(y) 38172 return true 38173 } 38174 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 38175 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 38176 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 38177 for { 38178 _ = v.Args[1] 38179 or := v.Args[0] 38180 if or.Op != OpAMD64ORQ { 38181 break 38182 } 38183 _ = or.Args[1] 38184 y := or.Args[0] 38185 s1 := or.Args[1] 38186 if s1.Op != OpAMD64SHLQconst { 38187 break 38188 } 38189 j1 := s1.AuxInt 38190 x1 := s1.Args[0] 38191 if x1.Op != OpAMD64MOVBloadidx1 { 38192 break 38193 } 38194 i1 := x1.AuxInt 38195 s := x1.Aux 38196 mem := x1.Args[2] 38197 p := x1.Args[0] 38198 idx := x1.Args[1] 38199 s0 := v.Args[1] 38200 if s0.Op != OpAMD64SHLQconst { 38201 break 38202 } 38203 j0 := s0.AuxInt 38204 x0 := s0.Args[0] 38205 if x0.Op != OpAMD64MOVBloadidx1 { 38206 break 38207 } 38208 i0 := x0.AuxInt 38209 if x0.Aux != s { 38210 break 38211 } 38212 _ = x0.Args[2] 38213 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 38214 break 38215 } 38216 b = mergePoint(b, x0, x1, y) 38217 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38218 v.reset(OpCopy) 38219 v.AddArg(v0) 38220 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38221 v1.AuxInt = j1 38222 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 38223 v2.AuxInt = 8 38224 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 38225 v3.AuxInt = i0 38226 v3.Aux = s 38227 v3.AddArg(p) 38228 v3.AddArg(idx) 38229 v3.AddArg(mem) 38230 v2.AddArg(v3) 38231 v1.AddArg(v2) 38232 v0.AddArg(v1) 38233 v0.AddArg(y) 38234 return true 38235 } 38236 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 38237 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 38238 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 38239 for { 38240 _ = v.Args[1] 38241 or := v.Args[0] 38242 if or.Op != OpAMD64ORQ { 38243 break 38244 } 38245 _ = or.Args[1] 38246 y := or.Args[0] 38247 s1 := or.Args[1] 38248 if s1.Op != OpAMD64SHLQconst { 38249 break 38250 } 38251 j1 := s1.AuxInt 38252 x1 := s1.Args[0] 38253 if x1.Op != OpAMD64MOVBloadidx1 { 38254 break 38255 } 38256 i1 := x1.AuxInt 38257 s := x1.Aux 38258 mem := x1.Args[2] 38259 idx := x1.Args[0] 38260 p := x1.Args[1] 38261 s0 := v.Args[1] 38262 if s0.Op != OpAMD64SHLQconst { 38263 break 38264 } 38265 j0 := s0.AuxInt 38266 x0 := s0.Args[0] 38267 if x0.Op != OpAMD64MOVBloadidx1 { 38268 break 38269 } 38270 i0 := x0.AuxInt 38271 if x0.Aux != s { 38272 break 38273 } 38274 _ = x0.Args[2] 38275 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 38276 break 38277 } 38278 b = mergePoint(b, x0, x1, y) 38279 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38280 v.reset(OpCopy) 38281 v.AddArg(v0) 38282 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38283 v1.AuxInt = j1 38284 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 38285 v2.AuxInt = 8 38286 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 38287 v3.AuxInt = i0 38288 v3.Aux = s 38289 v3.AddArg(p) 38290 v3.AddArg(idx) 38291 v3.AddArg(mem) 38292 v2.AddArg(v3) 38293 v1.AddArg(v2) 38294 v0.AddArg(v1) 38295 v0.AddArg(y) 38296 return true 38297 } 38298 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 38299 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 38300 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 38301 for { 38302 _ = v.Args[1] 38303 s0 := v.Args[0] 38304 if s0.Op != OpAMD64SHLQconst { 38305 break 38306 } 38307 j0 := s0.AuxInt 38308 r0 := s0.Args[0] 38309 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 38310 break 38311 } 38312 x0 := r0.Args[0] 38313 if x0.Op != OpAMD64MOVWloadidx1 { 38314 break 38315 } 38316 i0 := x0.AuxInt 38317 s := x0.Aux 38318 mem := x0.Args[2] 38319 p := x0.Args[0] 38320 idx := x0.Args[1] 38321 or := v.Args[1] 38322 if or.Op != OpAMD64ORQ { 38323 break 38324 } 38325 y := or.Args[1] 38326 s1 := or.Args[0] 38327 if s1.Op != OpAMD64SHLQconst { 38328 break 38329 } 38330 j1 := s1.AuxInt 38331 r1 := s1.Args[0] 38332 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 38333 break 38334 } 38335 x1 := r1.Args[0] 38336 if x1.Op != OpAMD64MOVWloadidx1 { 38337 break 38338 } 38339 i1 := x1.AuxInt 38340 if x1.Aux != s { 38341 break 38342 } 38343 _ = x1.Args[2] 38344 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 38345 break 38346 } 38347 b = mergePoint(b, x0, x1, y) 38348 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38349 v.reset(OpCopy) 38350 v.AddArg(v0) 38351 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38352 v1.AuxInt = j1 38353 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 38354 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38355 v3.AuxInt = i0 38356 v3.Aux = s 38357 v3.AddArg(p) 38358 v3.AddArg(idx) 38359 v3.AddArg(mem) 38360 v2.AddArg(v3) 38361 v1.AddArg(v2) 38362 v0.AddArg(v1) 38363 v0.AddArg(y) 38364 return true 38365 } 38366 return false 38367 } 38368 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { 38369 b := v.Block 38370 typ := &b.Func.Config.Types 38371 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 38372 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 38373 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 38374 for { 38375 _ = v.Args[1] 38376 s0 := v.Args[0] 38377 if s0.Op != OpAMD64SHLQconst { 38378 break 38379 } 38380 j0 := s0.AuxInt 38381 r0 := s0.Args[0] 38382 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 38383 break 38384 } 38385 x0 := r0.Args[0] 38386 if x0.Op != OpAMD64MOVWloadidx1 { 38387 break 38388 } 38389 i0 := x0.AuxInt 38390 s := x0.Aux 38391 mem := x0.Args[2] 38392 idx := x0.Args[0] 38393 p := x0.Args[1] 38394 or := v.Args[1] 38395 if or.Op != OpAMD64ORQ { 38396 break 38397 } 38398 y := or.Args[1] 38399 s1 := or.Args[0] 38400 if s1.Op != OpAMD64SHLQconst { 38401 break 38402 } 38403 j1 := s1.AuxInt 38404 r1 := s1.Args[0] 38405 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 38406 break 38407 } 38408 x1 := r1.Args[0] 38409 if x1.Op != OpAMD64MOVWloadidx1 { 38410 break 38411 } 38412 i1 := x1.AuxInt 38413 if x1.Aux != s { 38414 break 38415 } 38416 _ = x1.Args[2] 38417 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 38418 break 38419 } 38420 b = mergePoint(b, x0, x1, y) 38421 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38422 v.reset(OpCopy) 38423 v.AddArg(v0) 38424 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38425 v1.AuxInt = j1 38426 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 38427 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38428 v3.AuxInt = i0 38429 v3.Aux = s 38430 v3.AddArg(p) 38431 v3.AddArg(idx) 38432 v3.AddArg(mem) 38433 v2.AddArg(v3) 38434 v1.AddArg(v2) 38435 v0.AddArg(v1) 38436 v0.AddArg(y) 38437 return true 38438 } 38439 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 38440 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 38441 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 38442 for { 38443 _ = v.Args[1] 38444 s0 := v.Args[0] 38445 if s0.Op != OpAMD64SHLQconst { 38446 break 38447 } 38448 j0 := s0.AuxInt 38449 r0 := s0.Args[0] 38450 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 38451 break 38452 } 38453 x0 := r0.Args[0] 38454 if x0.Op != OpAMD64MOVWloadidx1 { 38455 break 38456 } 38457 i0 := x0.AuxInt 38458 s := x0.Aux 38459 mem := x0.Args[2] 38460 p := x0.Args[0] 38461 idx := x0.Args[1] 38462 or := v.Args[1] 38463 if or.Op != OpAMD64ORQ { 38464 break 38465 } 38466 y := or.Args[1] 38467 s1 := or.Args[0] 38468 if s1.Op != OpAMD64SHLQconst { 38469 break 38470 } 38471 j1 := s1.AuxInt 38472 r1 := s1.Args[0] 38473 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 38474 break 38475 } 38476 x1 := r1.Args[0] 38477 if x1.Op != OpAMD64MOVWloadidx1 { 38478 break 38479 } 38480 i1 := x1.AuxInt 38481 if x1.Aux != s { 38482 break 38483 } 38484 _ = x1.Args[2] 38485 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 38486 break 38487 } 38488 b = mergePoint(b, x0, x1, y) 38489 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38490 v.reset(OpCopy) 38491 v.AddArg(v0) 38492 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38493 v1.AuxInt = j1 38494 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 38495 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38496 v3.AuxInt = i0 38497 v3.Aux = s 38498 v3.AddArg(p) 38499 v3.AddArg(idx) 38500 v3.AddArg(mem) 38501 v2.AddArg(v3) 38502 v1.AddArg(v2) 38503 v0.AddArg(v1) 38504 v0.AddArg(y) 38505 return true 38506 } 38507 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 38508 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 38509 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 38510 for { 38511 _ = v.Args[1] 38512 s0 := v.Args[0] 38513 if s0.Op != OpAMD64SHLQconst { 38514 break 38515 } 38516 j0 := s0.AuxInt 38517 r0 := s0.Args[0] 38518 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 38519 break 38520 } 38521 x0 := r0.Args[0] 38522 if x0.Op != OpAMD64MOVWloadidx1 { 38523 break 38524 } 38525 i0 := x0.AuxInt 38526 s := x0.Aux 38527 mem := x0.Args[2] 38528 idx := x0.Args[0] 38529 p := x0.Args[1] 38530 or := v.Args[1] 38531 if or.Op != OpAMD64ORQ { 38532 break 38533 } 38534 y := or.Args[1] 38535 s1 := or.Args[0] 38536 if s1.Op != OpAMD64SHLQconst { 38537 break 38538 } 38539 j1 := s1.AuxInt 38540 r1 := s1.Args[0] 38541 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 38542 break 38543 } 38544 x1 := r1.Args[0] 38545 if x1.Op != OpAMD64MOVWloadidx1 { 38546 break 38547 } 38548 i1 := x1.AuxInt 38549 if x1.Aux != s { 38550 break 38551 } 38552 _ = x1.Args[2] 38553 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 38554 break 38555 } 38556 b = mergePoint(b, x0, x1, y) 38557 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38558 v.reset(OpCopy) 38559 v.AddArg(v0) 38560 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38561 v1.AuxInt = j1 38562 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 38563 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38564 v3.AuxInt = i0 38565 v3.Aux = s 38566 v3.AddArg(p) 38567 v3.AddArg(idx) 38568 v3.AddArg(mem) 38569 v2.AddArg(v3) 38570 v1.AddArg(v2) 38571 v0.AddArg(v1) 38572 v0.AddArg(y) 38573 return true 38574 } 38575 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 38576 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 38577 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 38578 for { 38579 _ = v.Args[1] 38580 s0 := v.Args[0] 38581 if s0.Op != OpAMD64SHLQconst { 38582 break 38583 } 38584 j0 := s0.AuxInt 38585 r0 := s0.Args[0] 38586 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 38587 break 38588 } 38589 x0 := r0.Args[0] 38590 if x0.Op != OpAMD64MOVWloadidx1 { 38591 break 38592 } 38593 i0 := x0.AuxInt 38594 s := x0.Aux 38595 mem := x0.Args[2] 38596 p := x0.Args[0] 38597 idx := x0.Args[1] 38598 or := v.Args[1] 38599 if or.Op != OpAMD64ORQ { 38600 break 38601 } 38602 _ = or.Args[1] 38603 y := or.Args[0] 38604 s1 := or.Args[1] 38605 if s1.Op != OpAMD64SHLQconst { 38606 break 38607 } 38608 j1 := s1.AuxInt 38609 r1 := s1.Args[0] 38610 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 38611 break 38612 } 38613 x1 := r1.Args[0] 38614 if x1.Op != OpAMD64MOVWloadidx1 { 38615 break 38616 } 38617 i1 := x1.AuxInt 38618 if x1.Aux != s { 38619 break 38620 } 38621 _ = x1.Args[2] 38622 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 38623 break 38624 } 38625 b = mergePoint(b, x0, x1, y) 38626 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38627 v.reset(OpCopy) 38628 v.AddArg(v0) 38629 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38630 v1.AuxInt = j1 38631 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 38632 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38633 v3.AuxInt = i0 38634 v3.Aux = s 38635 v3.AddArg(p) 38636 v3.AddArg(idx) 38637 v3.AddArg(mem) 38638 v2.AddArg(v3) 38639 v1.AddArg(v2) 38640 v0.AddArg(v1) 38641 v0.AddArg(y) 38642 return true 38643 } 38644 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 38645 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 38646 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 38647 for { 38648 _ = v.Args[1] 38649 s0 := v.Args[0] 38650 if s0.Op != OpAMD64SHLQconst { 38651 break 38652 } 38653 j0 := s0.AuxInt 38654 r0 := s0.Args[0] 38655 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 38656 break 38657 } 38658 x0 := r0.Args[0] 38659 if x0.Op != OpAMD64MOVWloadidx1 { 38660 break 38661 } 38662 i0 := x0.AuxInt 38663 s := x0.Aux 38664 mem := x0.Args[2] 38665 idx := x0.Args[0] 38666 p := x0.Args[1] 38667 or := v.Args[1] 38668 if or.Op != OpAMD64ORQ { 38669 break 38670 } 38671 _ = or.Args[1] 38672 y := or.Args[0] 38673 s1 := or.Args[1] 38674 if s1.Op != OpAMD64SHLQconst { 38675 break 38676 } 38677 j1 := s1.AuxInt 38678 r1 := s1.Args[0] 38679 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 38680 break 38681 } 38682 x1 := r1.Args[0] 38683 if x1.Op != OpAMD64MOVWloadidx1 { 38684 break 38685 } 38686 i1 := x1.AuxInt 38687 if x1.Aux != s { 38688 break 38689 } 38690 _ = x1.Args[2] 38691 if p != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 38692 break 38693 } 38694 b = mergePoint(b, x0, x1, y) 38695 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38696 v.reset(OpCopy) 38697 v.AddArg(v0) 38698 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38699 v1.AuxInt = j1 38700 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 38701 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38702 v3.AuxInt = i0 38703 v3.Aux = s 38704 v3.AddArg(p) 38705 v3.AddArg(idx) 38706 v3.AddArg(mem) 38707 v2.AddArg(v3) 38708 v1.AddArg(v2) 38709 v0.AddArg(v1) 38710 v0.AddArg(y) 38711 return true 38712 } 38713 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 38714 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 38715 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 38716 for { 38717 _ = v.Args[1] 38718 s0 := v.Args[0] 38719 if s0.Op != OpAMD64SHLQconst { 38720 break 38721 } 38722 j0 := s0.AuxInt 38723 r0 := s0.Args[0] 38724 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 38725 break 38726 } 38727 x0 := r0.Args[0] 38728 if x0.Op != OpAMD64MOVWloadidx1 { 38729 break 38730 } 38731 i0 := x0.AuxInt 38732 s := x0.Aux 38733 mem := x0.Args[2] 38734 p := x0.Args[0] 38735 idx := x0.Args[1] 38736 or := v.Args[1] 38737 if or.Op != OpAMD64ORQ { 38738 break 38739 } 38740 _ = or.Args[1] 38741 y := or.Args[0] 38742 s1 := or.Args[1] 38743 if s1.Op != OpAMD64SHLQconst { 38744 break 38745 } 38746 j1 := s1.AuxInt 38747 r1 := s1.Args[0] 38748 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 38749 break 38750 } 38751 x1 := r1.Args[0] 38752 if x1.Op != OpAMD64MOVWloadidx1 { 38753 break 38754 } 38755 i1 := x1.AuxInt 38756 if x1.Aux != s { 38757 break 38758 } 38759 _ = x1.Args[2] 38760 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 38761 break 38762 } 38763 b = mergePoint(b, x0, x1, y) 38764 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38765 v.reset(OpCopy) 38766 v.AddArg(v0) 38767 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38768 v1.AuxInt = j1 38769 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 38770 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38771 v3.AuxInt = i0 38772 v3.Aux = s 38773 v3.AddArg(p) 38774 v3.AddArg(idx) 38775 v3.AddArg(mem) 38776 v2.AddArg(v3) 38777 v1.AddArg(v2) 38778 v0.AddArg(v1) 38779 v0.AddArg(y) 38780 return true 38781 } 38782 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 38783 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 38784 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 38785 for { 38786 _ = v.Args[1] 38787 s0 := v.Args[0] 38788 if s0.Op != OpAMD64SHLQconst { 38789 break 38790 } 38791 j0 := s0.AuxInt 38792 r0 := s0.Args[0] 38793 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 38794 break 38795 } 38796 x0 := r0.Args[0] 38797 if x0.Op != OpAMD64MOVWloadidx1 { 38798 break 38799 } 38800 i0 := x0.AuxInt 38801 s := x0.Aux 38802 mem := x0.Args[2] 38803 idx := x0.Args[0] 38804 p := x0.Args[1] 38805 or := v.Args[1] 38806 if or.Op != OpAMD64ORQ { 38807 break 38808 } 38809 _ = or.Args[1] 38810 y := or.Args[0] 38811 s1 := or.Args[1] 38812 if s1.Op != OpAMD64SHLQconst { 38813 break 38814 } 38815 j1 := s1.AuxInt 38816 r1 := s1.Args[0] 38817 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 38818 break 38819 } 38820 x1 := r1.Args[0] 38821 if x1.Op != OpAMD64MOVWloadidx1 { 38822 break 38823 } 38824 i1 := x1.AuxInt 38825 if x1.Aux != s { 38826 break 38827 } 38828 _ = x1.Args[2] 38829 if idx != x1.Args[0] || p != x1.Args[1] || mem != x1.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 38830 break 38831 } 38832 b = mergePoint(b, x0, x1, y) 38833 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38834 v.reset(OpCopy) 38835 v.AddArg(v0) 38836 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38837 v1.AuxInt = j1 38838 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 38839 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38840 v3.AuxInt = i0 38841 v3.Aux = s 38842 v3.AddArg(p) 38843 v3.AddArg(idx) 38844 v3.AddArg(mem) 38845 v2.AddArg(v3) 38846 v1.AddArg(v2) 38847 v0.AddArg(v1) 38848 v0.AddArg(y) 38849 return true 38850 } 38851 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 38852 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 38853 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 38854 for { 38855 _ = v.Args[1] 38856 or := v.Args[0] 38857 if or.Op != OpAMD64ORQ { 38858 break 38859 } 38860 y := or.Args[1] 38861 s1 := or.Args[0] 38862 if s1.Op != OpAMD64SHLQconst { 38863 break 38864 } 38865 j1 := s1.AuxInt 38866 r1 := s1.Args[0] 38867 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 38868 break 38869 } 38870 x1 := r1.Args[0] 38871 if x1.Op != OpAMD64MOVWloadidx1 { 38872 break 38873 } 38874 i1 := x1.AuxInt 38875 s := x1.Aux 38876 mem := x1.Args[2] 38877 p := x1.Args[0] 38878 idx := x1.Args[1] 38879 s0 := v.Args[1] 38880 if s0.Op != OpAMD64SHLQconst { 38881 break 38882 } 38883 j0 := s0.AuxInt 38884 r0 := s0.Args[0] 38885 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 38886 break 38887 } 38888 x0 := r0.Args[0] 38889 if x0.Op != OpAMD64MOVWloadidx1 { 38890 break 38891 } 38892 i0 := x0.AuxInt 38893 if x0.Aux != s { 38894 break 38895 } 38896 _ = x0.Args[2] 38897 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 38898 break 38899 } 38900 b = mergePoint(b, x0, x1, y) 38901 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38902 v.reset(OpCopy) 38903 v.AddArg(v0) 38904 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38905 v1.AuxInt = j1 38906 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 38907 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38908 v3.AuxInt = i0 38909 v3.Aux = s 38910 v3.AddArg(p) 38911 v3.AddArg(idx) 38912 v3.AddArg(mem) 38913 v2.AddArg(v3) 38914 v1.AddArg(v2) 38915 v0.AddArg(v1) 38916 v0.AddArg(y) 38917 return true 38918 } 38919 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 38920 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 38921 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 38922 for { 38923 _ = v.Args[1] 38924 or := v.Args[0] 38925 if or.Op != OpAMD64ORQ { 38926 break 38927 } 38928 y := or.Args[1] 38929 s1 := or.Args[0] 38930 if s1.Op != OpAMD64SHLQconst { 38931 break 38932 } 38933 j1 := s1.AuxInt 38934 r1 := s1.Args[0] 38935 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 38936 break 38937 } 38938 x1 := r1.Args[0] 38939 if x1.Op != OpAMD64MOVWloadidx1 { 38940 break 38941 } 38942 i1 := x1.AuxInt 38943 s := x1.Aux 38944 mem := x1.Args[2] 38945 idx := x1.Args[0] 38946 p := x1.Args[1] 38947 s0 := v.Args[1] 38948 if s0.Op != OpAMD64SHLQconst { 38949 break 38950 } 38951 j0 := s0.AuxInt 38952 r0 := s0.Args[0] 38953 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 38954 break 38955 } 38956 x0 := r0.Args[0] 38957 if x0.Op != OpAMD64MOVWloadidx1 { 38958 break 38959 } 38960 i0 := x0.AuxInt 38961 if x0.Aux != s { 38962 break 38963 } 38964 _ = x0.Args[2] 38965 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 38966 break 38967 } 38968 b = mergePoint(b, x0, x1, y) 38969 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 38970 v.reset(OpCopy) 38971 v.AddArg(v0) 38972 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 38973 v1.AuxInt = j1 38974 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 38975 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 38976 v3.AuxInt = i0 38977 v3.Aux = s 38978 v3.AddArg(p) 38979 v3.AddArg(idx) 38980 v3.AddArg(mem) 38981 v2.AddArg(v3) 38982 v1.AddArg(v2) 38983 v0.AddArg(v1) 38984 v0.AddArg(y) 38985 return true 38986 } 38987 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 38988 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 38989 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 38990 for { 38991 _ = v.Args[1] 38992 or := v.Args[0] 38993 if or.Op != OpAMD64ORQ { 38994 break 38995 } 38996 _ = or.Args[1] 38997 y := or.Args[0] 38998 s1 := or.Args[1] 38999 if s1.Op != OpAMD64SHLQconst { 39000 break 39001 } 39002 j1 := s1.AuxInt 39003 r1 := s1.Args[0] 39004 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 39005 break 39006 } 39007 x1 := r1.Args[0] 39008 if x1.Op != OpAMD64MOVWloadidx1 { 39009 break 39010 } 39011 i1 := x1.AuxInt 39012 s := x1.Aux 39013 mem := x1.Args[2] 39014 p := x1.Args[0] 39015 idx := x1.Args[1] 39016 s0 := v.Args[1] 39017 if s0.Op != OpAMD64SHLQconst { 39018 break 39019 } 39020 j0 := s0.AuxInt 39021 r0 := s0.Args[0] 39022 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 39023 break 39024 } 39025 x0 := r0.Args[0] 39026 if x0.Op != OpAMD64MOVWloadidx1 { 39027 break 39028 } 39029 i0 := x0.AuxInt 39030 if x0.Aux != s { 39031 break 39032 } 39033 _ = x0.Args[2] 39034 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 39035 break 39036 } 39037 b = mergePoint(b, x0, x1, y) 39038 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39039 v.reset(OpCopy) 39040 v.AddArg(v0) 39041 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39042 v1.AuxInt = j1 39043 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 39044 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 39045 v3.AuxInt = i0 39046 v3.Aux = s 39047 v3.AddArg(p) 39048 v3.AddArg(idx) 39049 v3.AddArg(mem) 39050 v2.AddArg(v3) 39051 v1.AddArg(v2) 39052 v0.AddArg(v1) 39053 v0.AddArg(y) 39054 return true 39055 } 39056 return false 39057 } 39058 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { 39059 b := v.Block 39060 typ := &b.Func.Config.Types 39061 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 39062 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 39063 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 39064 for { 39065 _ = v.Args[1] 39066 or := v.Args[0] 39067 if or.Op != OpAMD64ORQ { 39068 break 39069 } 39070 _ = or.Args[1] 39071 y := or.Args[0] 39072 s1 := or.Args[1] 39073 if s1.Op != OpAMD64SHLQconst { 39074 break 39075 } 39076 j1 := s1.AuxInt 39077 r1 := s1.Args[0] 39078 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 39079 break 39080 } 39081 x1 := r1.Args[0] 39082 if x1.Op != OpAMD64MOVWloadidx1 { 39083 break 39084 } 39085 i1 := x1.AuxInt 39086 s := x1.Aux 39087 mem := x1.Args[2] 39088 idx := x1.Args[0] 39089 p := x1.Args[1] 39090 s0 := v.Args[1] 39091 if s0.Op != OpAMD64SHLQconst { 39092 break 39093 } 39094 j0 := s0.AuxInt 39095 r0 := s0.Args[0] 39096 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 39097 break 39098 } 39099 x0 := r0.Args[0] 39100 if x0.Op != OpAMD64MOVWloadidx1 { 39101 break 39102 } 39103 i0 := x0.AuxInt 39104 if x0.Aux != s { 39105 break 39106 } 39107 _ = x0.Args[2] 39108 if p != x0.Args[0] || idx != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 39109 break 39110 } 39111 b = mergePoint(b, x0, x1, y) 39112 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39113 v.reset(OpCopy) 39114 v.AddArg(v0) 39115 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39116 v1.AuxInt = j1 39117 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 39118 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 39119 v3.AuxInt = i0 39120 v3.Aux = s 39121 v3.AddArg(p) 39122 v3.AddArg(idx) 39123 v3.AddArg(mem) 39124 v2.AddArg(v3) 39125 v1.AddArg(v2) 39126 v0.AddArg(v1) 39127 v0.AddArg(y) 39128 return true 39129 } 39130 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 39131 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 39132 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 39133 for { 39134 _ = v.Args[1] 39135 or := v.Args[0] 39136 if or.Op != OpAMD64ORQ { 39137 break 39138 } 39139 y := or.Args[1] 39140 s1 := or.Args[0] 39141 if s1.Op != OpAMD64SHLQconst { 39142 break 39143 } 39144 j1 := s1.AuxInt 39145 r1 := s1.Args[0] 39146 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 39147 break 39148 } 39149 x1 := r1.Args[0] 39150 if x1.Op != OpAMD64MOVWloadidx1 { 39151 break 39152 } 39153 i1 := x1.AuxInt 39154 s := x1.Aux 39155 mem := x1.Args[2] 39156 p := x1.Args[0] 39157 idx := x1.Args[1] 39158 s0 := v.Args[1] 39159 if s0.Op != OpAMD64SHLQconst { 39160 break 39161 } 39162 j0 := s0.AuxInt 39163 r0 := s0.Args[0] 39164 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 39165 break 39166 } 39167 x0 := r0.Args[0] 39168 if x0.Op != OpAMD64MOVWloadidx1 { 39169 break 39170 } 39171 i0 := x0.AuxInt 39172 if x0.Aux != s { 39173 break 39174 } 39175 _ = x0.Args[2] 39176 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 39177 break 39178 } 39179 b = mergePoint(b, x0, x1, y) 39180 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39181 v.reset(OpCopy) 39182 v.AddArg(v0) 39183 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39184 v1.AuxInt = j1 39185 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 39186 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 39187 v3.AuxInt = i0 39188 v3.Aux = s 39189 v3.AddArg(p) 39190 v3.AddArg(idx) 39191 v3.AddArg(mem) 39192 v2.AddArg(v3) 39193 v1.AddArg(v2) 39194 v0.AddArg(v1) 39195 v0.AddArg(y) 39196 return true 39197 } 39198 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 39199 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 39200 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 39201 for { 39202 _ = v.Args[1] 39203 or := v.Args[0] 39204 if or.Op != OpAMD64ORQ { 39205 break 39206 } 39207 y := or.Args[1] 39208 s1 := or.Args[0] 39209 if s1.Op != OpAMD64SHLQconst { 39210 break 39211 } 39212 j1 := s1.AuxInt 39213 r1 := s1.Args[0] 39214 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 39215 break 39216 } 39217 x1 := r1.Args[0] 39218 if x1.Op != OpAMD64MOVWloadidx1 { 39219 break 39220 } 39221 i1 := x1.AuxInt 39222 s := x1.Aux 39223 mem := x1.Args[2] 39224 idx := x1.Args[0] 39225 p := x1.Args[1] 39226 s0 := v.Args[1] 39227 if s0.Op != OpAMD64SHLQconst { 39228 break 39229 } 39230 j0 := s0.AuxInt 39231 r0 := s0.Args[0] 39232 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 39233 break 39234 } 39235 x0 := r0.Args[0] 39236 if x0.Op != OpAMD64MOVWloadidx1 { 39237 break 39238 } 39239 i0 := x0.AuxInt 39240 if x0.Aux != s { 39241 break 39242 } 39243 _ = x0.Args[2] 39244 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 39245 break 39246 } 39247 b = mergePoint(b, x0, x1, y) 39248 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39249 v.reset(OpCopy) 39250 v.AddArg(v0) 39251 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39252 v1.AuxInt = j1 39253 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 39254 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 39255 v3.AuxInt = i0 39256 v3.Aux = s 39257 v3.AddArg(p) 39258 v3.AddArg(idx) 39259 v3.AddArg(mem) 39260 v2.AddArg(v3) 39261 v1.AddArg(v2) 39262 v0.AddArg(v1) 39263 v0.AddArg(y) 39264 return true 39265 } 39266 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 39267 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 39268 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 39269 for { 39270 _ = v.Args[1] 39271 or := v.Args[0] 39272 if or.Op != OpAMD64ORQ { 39273 break 39274 } 39275 _ = or.Args[1] 39276 y := or.Args[0] 39277 s1 := or.Args[1] 39278 if s1.Op != OpAMD64SHLQconst { 39279 break 39280 } 39281 j1 := s1.AuxInt 39282 r1 := s1.Args[0] 39283 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 39284 break 39285 } 39286 x1 := r1.Args[0] 39287 if x1.Op != OpAMD64MOVWloadidx1 { 39288 break 39289 } 39290 i1 := x1.AuxInt 39291 s := x1.Aux 39292 mem := x1.Args[2] 39293 p := x1.Args[0] 39294 idx := x1.Args[1] 39295 s0 := v.Args[1] 39296 if s0.Op != OpAMD64SHLQconst { 39297 break 39298 } 39299 j0 := s0.AuxInt 39300 r0 := s0.Args[0] 39301 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 39302 break 39303 } 39304 x0 := r0.Args[0] 39305 if x0.Op != OpAMD64MOVWloadidx1 { 39306 break 39307 } 39308 i0 := x0.AuxInt 39309 if x0.Aux != s { 39310 break 39311 } 39312 _ = x0.Args[2] 39313 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 39314 break 39315 } 39316 b = mergePoint(b, x0, x1, y) 39317 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39318 v.reset(OpCopy) 39319 v.AddArg(v0) 39320 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39321 v1.AuxInt = j1 39322 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 39323 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 39324 v3.AuxInt = i0 39325 v3.Aux = s 39326 v3.AddArg(p) 39327 v3.AddArg(idx) 39328 v3.AddArg(mem) 39329 v2.AddArg(v3) 39330 v1.AddArg(v2) 39331 v0.AddArg(v1) 39332 v0.AddArg(y) 39333 return true 39334 } 39335 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 39336 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 39337 // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 39338 for { 39339 _ = v.Args[1] 39340 or := v.Args[0] 39341 if or.Op != OpAMD64ORQ { 39342 break 39343 } 39344 _ = or.Args[1] 39345 y := or.Args[0] 39346 s1 := or.Args[1] 39347 if s1.Op != OpAMD64SHLQconst { 39348 break 39349 } 39350 j1 := s1.AuxInt 39351 r1 := s1.Args[0] 39352 if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { 39353 break 39354 } 39355 x1 := r1.Args[0] 39356 if x1.Op != OpAMD64MOVWloadidx1 { 39357 break 39358 } 39359 i1 := x1.AuxInt 39360 s := x1.Aux 39361 mem := x1.Args[2] 39362 idx := x1.Args[0] 39363 p := x1.Args[1] 39364 s0 := v.Args[1] 39365 if s0.Op != OpAMD64SHLQconst { 39366 break 39367 } 39368 j0 := s0.AuxInt 39369 r0 := s0.Args[0] 39370 if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { 39371 break 39372 } 39373 x0 := r0.Args[0] 39374 if x0.Op != OpAMD64MOVWloadidx1 { 39375 break 39376 } 39377 i0 := x0.AuxInt 39378 if x0.Aux != s { 39379 break 39380 } 39381 _ = x0.Args[2] 39382 if idx != x0.Args[0] || p != x0.Args[1] || mem != x0.Args[2] || !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 39383 break 39384 } 39385 b = mergePoint(b, x0, x1, y) 39386 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 39387 v.reset(OpCopy) 39388 v.AddArg(v0) 39389 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 39390 v1.AuxInt = j1 39391 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 39392 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 39393 v3.AuxInt = i0 39394 v3.Aux = s 39395 v3.AddArg(p) 39396 v3.AddArg(idx) 39397 v3.AddArg(mem) 39398 v2.AddArg(v3) 39399 v1.AddArg(v2) 39400 v0.AddArg(v1) 39401 v0.AddArg(y) 39402 return true 39403 } 39404 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 39405 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 39406 // result: (ORQload x [off] {sym} ptr mem) 39407 for { 39408 _ = v.Args[1] 39409 x := v.Args[0] 39410 l := v.Args[1] 39411 if l.Op != OpAMD64MOVQload { 39412 break 39413 } 39414 off := l.AuxInt 39415 sym := l.Aux 39416 mem := l.Args[1] 39417 ptr := l.Args[0] 39418 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 39419 break 39420 } 39421 v.reset(OpAMD64ORQload) 39422 v.AuxInt = off 39423 v.Aux = sym 39424 v.AddArg(x) 39425 v.AddArg(ptr) 39426 v.AddArg(mem) 39427 return true 39428 } 39429 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 39430 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 39431 // result: (ORQload x [off] {sym} ptr mem) 39432 for { 39433 x := v.Args[1] 39434 l := v.Args[0] 39435 if l.Op != OpAMD64MOVQload { 39436 break 39437 } 39438 off := l.AuxInt 39439 sym := l.Aux 39440 mem := l.Args[1] 39441 ptr := l.Args[0] 39442 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 39443 break 39444 } 39445 v.reset(OpAMD64ORQload) 39446 v.AuxInt = off 39447 v.Aux = sym 39448 v.AddArg(x) 39449 v.AddArg(ptr) 39450 v.AddArg(mem) 39451 return true 39452 } 39453 return false 39454 } 39455 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { 39456 // match: (ORQconst [c] x) 39457 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 39458 // result: (BTSQconst [log2(c)] x) 39459 for { 39460 c := v.AuxInt 39461 x := v.Args[0] 39462 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { 39463 break 39464 } 39465 v.reset(OpAMD64BTSQconst) 39466 v.AuxInt = log2(c) 39467 v.AddArg(x) 39468 return true 39469 } 39470 // match: (ORQconst [c] (ORQconst [d] x)) 39471 // result: (ORQconst [c | d] x) 39472 for { 39473 c := v.AuxInt 39474 v_0 := v.Args[0] 39475 if v_0.Op != OpAMD64ORQconst { 39476 break 39477 } 39478 d := v_0.AuxInt 39479 x := v_0.Args[0] 39480 v.reset(OpAMD64ORQconst) 39481 v.AuxInt = c | d 39482 v.AddArg(x) 39483 return true 39484 } 39485 // match: (ORQconst [c] (BTSQconst [d] x)) 39486 // result: (ORQconst [c | 1<<uint32(d)] x) 39487 for { 39488 c := v.AuxInt 39489 v_0 := v.Args[0] 39490 if v_0.Op != OpAMD64BTSQconst { 39491 break 39492 } 39493 d := v_0.AuxInt 39494 x := v_0.Args[0] 39495 v.reset(OpAMD64ORQconst) 39496 v.AuxInt = c | 1<<uint32(d) 39497 v.AddArg(x) 39498 return true 39499 } 39500 // match: (ORQconst [0] x) 39501 // result: x 39502 for { 39503 if v.AuxInt != 0 { 39504 break 39505 } 39506 x := v.Args[0] 39507 v.reset(OpCopy) 39508 v.Type = x.Type 39509 v.AddArg(x) 39510 return true 39511 } 39512 // match: (ORQconst [-1] _) 39513 // result: (MOVQconst [-1]) 39514 for { 39515 if v.AuxInt != -1 { 39516 break 39517 } 39518 v.reset(OpAMD64MOVQconst) 39519 v.AuxInt = -1 39520 return true 39521 } 39522 // match: (ORQconst [c] (MOVQconst [d])) 39523 // result: (MOVQconst [c|d]) 39524 for { 39525 c := v.AuxInt 39526 v_0 := v.Args[0] 39527 if v_0.Op != OpAMD64MOVQconst { 39528 break 39529 } 39530 d := v_0.AuxInt 39531 v.reset(OpAMD64MOVQconst) 39532 v.AuxInt = c | d 39533 return true 39534 } 39535 return false 39536 } 39537 func rewriteValueAMD64_OpAMD64ORQconstmodify_0(v *Value) bool { 39538 // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 39539 // cond: ValAndOff(valoff1).canAdd(off2) 39540 // result: (ORQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 39541 for { 39542 valoff1 := v.AuxInt 39543 sym := v.Aux 39544 mem := v.Args[1] 39545 v_0 := v.Args[0] 39546 if v_0.Op != OpAMD64ADDQconst { 39547 break 39548 } 39549 off2 := v_0.AuxInt 39550 base := v_0.Args[0] 39551 if !(ValAndOff(valoff1).canAdd(off2)) { 39552 break 39553 } 39554 v.reset(OpAMD64ORQconstmodify) 39555 v.AuxInt = ValAndOff(valoff1).add(off2) 39556 v.Aux = sym 39557 v.AddArg(base) 39558 v.AddArg(mem) 39559 return true 39560 } 39561 // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 39562 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 39563 // result: (ORQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 39564 for { 39565 valoff1 := v.AuxInt 39566 sym1 := v.Aux 39567 mem := v.Args[1] 39568 v_0 := v.Args[0] 39569 if v_0.Op != OpAMD64LEAQ { 39570 break 39571 } 39572 off2 := v_0.AuxInt 39573 sym2 := v_0.Aux 39574 base := v_0.Args[0] 39575 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 39576 break 39577 } 39578 v.reset(OpAMD64ORQconstmodify) 39579 v.AuxInt = ValAndOff(valoff1).add(off2) 39580 v.Aux = mergeSym(sym1, sym2) 39581 v.AddArg(base) 39582 v.AddArg(mem) 39583 return true 39584 } 39585 return false 39586 } 39587 func rewriteValueAMD64_OpAMD64ORQload_0(v *Value) bool { 39588 b := v.Block 39589 typ := &b.Func.Config.Types 39590 // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem) 39591 // cond: is32Bit(off1+off2) 39592 // result: (ORQload [off1+off2] {sym} val base mem) 39593 for { 39594 off1 := v.AuxInt 39595 sym := v.Aux 39596 mem := v.Args[2] 39597 val := v.Args[0] 39598 v_1 := v.Args[1] 39599 if v_1.Op != OpAMD64ADDQconst { 39600 break 39601 } 39602 off2 := v_1.AuxInt 39603 base := v_1.Args[0] 39604 if !(is32Bit(off1 + off2)) { 39605 break 39606 } 39607 v.reset(OpAMD64ORQload) 39608 v.AuxInt = off1 + off2 39609 v.Aux = sym 39610 v.AddArg(val) 39611 v.AddArg(base) 39612 v.AddArg(mem) 39613 return true 39614 } 39615 // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 39616 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 39617 // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 39618 for { 39619 off1 := v.AuxInt 39620 sym1 := v.Aux 39621 mem := v.Args[2] 39622 val := v.Args[0] 39623 v_1 := v.Args[1] 39624 if v_1.Op != OpAMD64LEAQ { 39625 break 39626 } 39627 off2 := v_1.AuxInt 39628 sym2 := v_1.Aux 39629 base := v_1.Args[0] 39630 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 39631 break 39632 } 39633 v.reset(OpAMD64ORQload) 39634 v.AuxInt = off1 + off2 39635 v.Aux = mergeSym(sym1, sym2) 39636 v.AddArg(val) 39637 v.AddArg(base) 39638 v.AddArg(mem) 39639 return true 39640 } 39641 // match: (ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 39642 // result: ( ORQ x (MOVQf2i y)) 39643 for { 39644 off := v.AuxInt 39645 sym := v.Aux 39646 _ = v.Args[2] 39647 x := v.Args[0] 39648 ptr := v.Args[1] 39649 v_2 := v.Args[2] 39650 if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { 39651 break 39652 } 39653 _ = v_2.Args[2] 39654 if ptr != v_2.Args[0] { 39655 break 39656 } 39657 y := v_2.Args[1] 39658 v.reset(OpAMD64ORQ) 39659 v.AddArg(x) 39660 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 39661 v0.AddArg(y) 39662 v.AddArg(v0) 39663 return true 39664 } 39665 return false 39666 } 39667 func rewriteValueAMD64_OpAMD64ORQmodify_0(v *Value) bool { 39668 // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 39669 // cond: is32Bit(off1+off2) 39670 // result: (ORQmodify [off1+off2] {sym} base val mem) 39671 for { 39672 off1 := v.AuxInt 39673 sym := v.Aux 39674 mem := v.Args[2] 39675 v_0 := v.Args[0] 39676 if v_0.Op != OpAMD64ADDQconst { 39677 break 39678 } 39679 off2 := v_0.AuxInt 39680 base := v_0.Args[0] 39681 val := v.Args[1] 39682 if !(is32Bit(off1 + off2)) { 39683 break 39684 } 39685 v.reset(OpAMD64ORQmodify) 39686 v.AuxInt = off1 + off2 39687 v.Aux = sym 39688 v.AddArg(base) 39689 v.AddArg(val) 39690 v.AddArg(mem) 39691 return true 39692 } 39693 // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 39694 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 39695 // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 39696 for { 39697 off1 := v.AuxInt 39698 sym1 := v.Aux 39699 mem := v.Args[2] 39700 v_0 := v.Args[0] 39701 if v_0.Op != OpAMD64LEAQ { 39702 break 39703 } 39704 off2 := v_0.AuxInt 39705 sym2 := v_0.Aux 39706 base := v_0.Args[0] 39707 val := v.Args[1] 39708 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 39709 break 39710 } 39711 v.reset(OpAMD64ORQmodify) 39712 v.AuxInt = off1 + off2 39713 v.Aux = mergeSym(sym1, sym2) 39714 v.AddArg(base) 39715 v.AddArg(val) 39716 v.AddArg(mem) 39717 return true 39718 } 39719 return false 39720 } 39721 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { 39722 // match: (ROLB x (NEGQ y)) 39723 // result: (RORB x y) 39724 for { 39725 _ = v.Args[1] 39726 x := v.Args[0] 39727 v_1 := v.Args[1] 39728 if v_1.Op != OpAMD64NEGQ { 39729 break 39730 } 39731 y := v_1.Args[0] 39732 v.reset(OpAMD64RORB) 39733 v.AddArg(x) 39734 v.AddArg(y) 39735 return true 39736 } 39737 // match: (ROLB x (NEGL y)) 39738 // result: (RORB x y) 39739 for { 39740 _ = v.Args[1] 39741 x := v.Args[0] 39742 v_1 := v.Args[1] 39743 if v_1.Op != OpAMD64NEGL { 39744 break 39745 } 39746 y := v_1.Args[0] 39747 v.reset(OpAMD64RORB) 39748 v.AddArg(x) 39749 v.AddArg(y) 39750 return true 39751 } 39752 // match: (ROLB x (MOVQconst [c])) 39753 // result: (ROLBconst [c&7 ] x) 39754 for { 39755 _ = v.Args[1] 39756 x := v.Args[0] 39757 v_1 := v.Args[1] 39758 if v_1.Op != OpAMD64MOVQconst { 39759 break 39760 } 39761 c := v_1.AuxInt 39762 v.reset(OpAMD64ROLBconst) 39763 v.AuxInt = c & 7 39764 v.AddArg(x) 39765 return true 39766 } 39767 // match: (ROLB x (MOVLconst [c])) 39768 // result: (ROLBconst [c&7 ] x) 39769 for { 39770 _ = v.Args[1] 39771 x := v.Args[0] 39772 v_1 := v.Args[1] 39773 if v_1.Op != OpAMD64MOVLconst { 39774 break 39775 } 39776 c := v_1.AuxInt 39777 v.reset(OpAMD64ROLBconst) 39778 v.AuxInt = c & 7 39779 v.AddArg(x) 39780 return true 39781 } 39782 return false 39783 } 39784 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool { 39785 // match: (ROLBconst [c] (ROLBconst [d] x)) 39786 // result: (ROLBconst [(c+d)& 7] x) 39787 for { 39788 c := v.AuxInt 39789 v_0 := v.Args[0] 39790 if v_0.Op != OpAMD64ROLBconst { 39791 break 39792 } 39793 d := v_0.AuxInt 39794 x := v_0.Args[0] 39795 v.reset(OpAMD64ROLBconst) 39796 v.AuxInt = (c + d) & 7 39797 v.AddArg(x) 39798 return true 39799 } 39800 // match: (ROLBconst x [0]) 39801 // result: x 39802 for { 39803 if v.AuxInt != 0 { 39804 break 39805 } 39806 x := v.Args[0] 39807 v.reset(OpCopy) 39808 v.Type = x.Type 39809 v.AddArg(x) 39810 return true 39811 } 39812 return false 39813 } 39814 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool { 39815 // match: (ROLL x (NEGQ y)) 39816 // result: (RORL x y) 39817 for { 39818 _ = v.Args[1] 39819 x := v.Args[0] 39820 v_1 := v.Args[1] 39821 if v_1.Op != OpAMD64NEGQ { 39822 break 39823 } 39824 y := v_1.Args[0] 39825 v.reset(OpAMD64RORL) 39826 v.AddArg(x) 39827 v.AddArg(y) 39828 return true 39829 } 39830 // match: (ROLL x (NEGL y)) 39831 // result: (RORL x y) 39832 for { 39833 _ = v.Args[1] 39834 x := v.Args[0] 39835 v_1 := v.Args[1] 39836 if v_1.Op != OpAMD64NEGL { 39837 break 39838 } 39839 y := v_1.Args[0] 39840 v.reset(OpAMD64RORL) 39841 v.AddArg(x) 39842 v.AddArg(y) 39843 return true 39844 } 39845 // match: (ROLL x (MOVQconst [c])) 39846 // result: (ROLLconst [c&31] x) 39847 for { 39848 _ = v.Args[1] 39849 x := v.Args[0] 39850 v_1 := v.Args[1] 39851 if v_1.Op != OpAMD64MOVQconst { 39852 break 39853 } 39854 c := v_1.AuxInt 39855 v.reset(OpAMD64ROLLconst) 39856 v.AuxInt = c & 31 39857 v.AddArg(x) 39858 return true 39859 } 39860 // match: (ROLL x (MOVLconst [c])) 39861 // result: (ROLLconst [c&31] x) 39862 for { 39863 _ = v.Args[1] 39864 x := v.Args[0] 39865 v_1 := v.Args[1] 39866 if v_1.Op != OpAMD64MOVLconst { 39867 break 39868 } 39869 c := v_1.AuxInt 39870 v.reset(OpAMD64ROLLconst) 39871 v.AuxInt = c & 31 39872 v.AddArg(x) 39873 return true 39874 } 39875 return false 39876 } 39877 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool { 39878 // match: (ROLLconst [c] (ROLLconst [d] x)) 39879 // result: (ROLLconst [(c+d)&31] x) 39880 for { 39881 c := v.AuxInt 39882 v_0 := v.Args[0] 39883 if v_0.Op != OpAMD64ROLLconst { 39884 break 39885 } 39886 d := v_0.AuxInt 39887 x := v_0.Args[0] 39888 v.reset(OpAMD64ROLLconst) 39889 v.AuxInt = (c + d) & 31 39890 v.AddArg(x) 39891 return true 39892 } 39893 // match: (ROLLconst x [0]) 39894 // result: x 39895 for { 39896 if v.AuxInt != 0 { 39897 break 39898 } 39899 x := v.Args[0] 39900 v.reset(OpCopy) 39901 v.Type = x.Type 39902 v.AddArg(x) 39903 return true 39904 } 39905 return false 39906 } 39907 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool { 39908 // match: (ROLQ x (NEGQ y)) 39909 // result: (RORQ x y) 39910 for { 39911 _ = v.Args[1] 39912 x := v.Args[0] 39913 v_1 := v.Args[1] 39914 if v_1.Op != OpAMD64NEGQ { 39915 break 39916 } 39917 y := v_1.Args[0] 39918 v.reset(OpAMD64RORQ) 39919 v.AddArg(x) 39920 v.AddArg(y) 39921 return true 39922 } 39923 // match: (ROLQ x (NEGL y)) 39924 // result: (RORQ x y) 39925 for { 39926 _ = v.Args[1] 39927 x := v.Args[0] 39928 v_1 := v.Args[1] 39929 if v_1.Op != OpAMD64NEGL { 39930 break 39931 } 39932 y := v_1.Args[0] 39933 v.reset(OpAMD64RORQ) 39934 v.AddArg(x) 39935 v.AddArg(y) 39936 return true 39937 } 39938 // match: (ROLQ x (MOVQconst [c])) 39939 // result: (ROLQconst [c&63] x) 39940 for { 39941 _ = v.Args[1] 39942 x := v.Args[0] 39943 v_1 := v.Args[1] 39944 if v_1.Op != OpAMD64MOVQconst { 39945 break 39946 } 39947 c := v_1.AuxInt 39948 v.reset(OpAMD64ROLQconst) 39949 v.AuxInt = c & 63 39950 v.AddArg(x) 39951 return true 39952 } 39953 // match: (ROLQ x (MOVLconst [c])) 39954 // result: (ROLQconst [c&63] x) 39955 for { 39956 _ = v.Args[1] 39957 x := v.Args[0] 39958 v_1 := v.Args[1] 39959 if v_1.Op != OpAMD64MOVLconst { 39960 break 39961 } 39962 c := v_1.AuxInt 39963 v.reset(OpAMD64ROLQconst) 39964 v.AuxInt = c & 63 39965 v.AddArg(x) 39966 return true 39967 } 39968 return false 39969 } 39970 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool { 39971 // match: (ROLQconst [c] (ROLQconst [d] x)) 39972 // result: (ROLQconst [(c+d)&63] x) 39973 for { 39974 c := v.AuxInt 39975 v_0 := v.Args[0] 39976 if v_0.Op != OpAMD64ROLQconst { 39977 break 39978 } 39979 d := v_0.AuxInt 39980 x := v_0.Args[0] 39981 v.reset(OpAMD64ROLQconst) 39982 v.AuxInt = (c + d) & 63 39983 v.AddArg(x) 39984 return true 39985 } 39986 // match: (ROLQconst x [0]) 39987 // result: x 39988 for { 39989 if v.AuxInt != 0 { 39990 break 39991 } 39992 x := v.Args[0] 39993 v.reset(OpCopy) 39994 v.Type = x.Type 39995 v.AddArg(x) 39996 return true 39997 } 39998 return false 39999 } 40000 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool { 40001 // match: (ROLW x (NEGQ y)) 40002 // result: (RORW x y) 40003 for { 40004 _ = v.Args[1] 40005 x := v.Args[0] 40006 v_1 := v.Args[1] 40007 if v_1.Op != OpAMD64NEGQ { 40008 break 40009 } 40010 y := v_1.Args[0] 40011 v.reset(OpAMD64RORW) 40012 v.AddArg(x) 40013 v.AddArg(y) 40014 return true 40015 } 40016 // match: (ROLW x (NEGL y)) 40017 // result: (RORW x y) 40018 for { 40019 _ = v.Args[1] 40020 x := v.Args[0] 40021 v_1 := v.Args[1] 40022 if v_1.Op != OpAMD64NEGL { 40023 break 40024 } 40025 y := v_1.Args[0] 40026 v.reset(OpAMD64RORW) 40027 v.AddArg(x) 40028 v.AddArg(y) 40029 return true 40030 } 40031 // match: (ROLW x (MOVQconst [c])) 40032 // result: (ROLWconst [c&15] x) 40033 for { 40034 _ = v.Args[1] 40035 x := v.Args[0] 40036 v_1 := v.Args[1] 40037 if v_1.Op != OpAMD64MOVQconst { 40038 break 40039 } 40040 c := v_1.AuxInt 40041 v.reset(OpAMD64ROLWconst) 40042 v.AuxInt = c & 15 40043 v.AddArg(x) 40044 return true 40045 } 40046 // match: (ROLW x (MOVLconst [c])) 40047 // result: (ROLWconst [c&15] x) 40048 for { 40049 _ = v.Args[1] 40050 x := v.Args[0] 40051 v_1 := v.Args[1] 40052 if v_1.Op != OpAMD64MOVLconst { 40053 break 40054 } 40055 c := v_1.AuxInt 40056 v.reset(OpAMD64ROLWconst) 40057 v.AuxInt = c & 15 40058 v.AddArg(x) 40059 return true 40060 } 40061 return false 40062 } 40063 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool { 40064 // match: (ROLWconst [c] (ROLWconst [d] x)) 40065 // result: (ROLWconst [(c+d)&15] x) 40066 for { 40067 c := v.AuxInt 40068 v_0 := v.Args[0] 40069 if v_0.Op != OpAMD64ROLWconst { 40070 break 40071 } 40072 d := v_0.AuxInt 40073 x := v_0.Args[0] 40074 v.reset(OpAMD64ROLWconst) 40075 v.AuxInt = (c + d) & 15 40076 v.AddArg(x) 40077 return true 40078 } 40079 // match: (ROLWconst x [0]) 40080 // result: x 40081 for { 40082 if v.AuxInt != 0 { 40083 break 40084 } 40085 x := v.Args[0] 40086 v.reset(OpCopy) 40087 v.Type = x.Type 40088 v.AddArg(x) 40089 return true 40090 } 40091 return false 40092 } 40093 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool { 40094 // match: (RORB x (NEGQ y)) 40095 // result: (ROLB x y) 40096 for { 40097 _ = v.Args[1] 40098 x := v.Args[0] 40099 v_1 := v.Args[1] 40100 if v_1.Op != OpAMD64NEGQ { 40101 break 40102 } 40103 y := v_1.Args[0] 40104 v.reset(OpAMD64ROLB) 40105 v.AddArg(x) 40106 v.AddArg(y) 40107 return true 40108 } 40109 // match: (RORB x (NEGL y)) 40110 // result: (ROLB x y) 40111 for { 40112 _ = v.Args[1] 40113 x := v.Args[0] 40114 v_1 := v.Args[1] 40115 if v_1.Op != OpAMD64NEGL { 40116 break 40117 } 40118 y := v_1.Args[0] 40119 v.reset(OpAMD64ROLB) 40120 v.AddArg(x) 40121 v.AddArg(y) 40122 return true 40123 } 40124 // match: (RORB x (MOVQconst [c])) 40125 // result: (ROLBconst [(-c)&7 ] x) 40126 for { 40127 _ = v.Args[1] 40128 x := v.Args[0] 40129 v_1 := v.Args[1] 40130 if v_1.Op != OpAMD64MOVQconst { 40131 break 40132 } 40133 c := v_1.AuxInt 40134 v.reset(OpAMD64ROLBconst) 40135 v.AuxInt = (-c) & 7 40136 v.AddArg(x) 40137 return true 40138 } 40139 // match: (RORB x (MOVLconst [c])) 40140 // result: (ROLBconst [(-c)&7 ] x) 40141 for { 40142 _ = v.Args[1] 40143 x := v.Args[0] 40144 v_1 := v.Args[1] 40145 if v_1.Op != OpAMD64MOVLconst { 40146 break 40147 } 40148 c := v_1.AuxInt 40149 v.reset(OpAMD64ROLBconst) 40150 v.AuxInt = (-c) & 7 40151 v.AddArg(x) 40152 return true 40153 } 40154 return false 40155 } 40156 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool { 40157 // match: (RORL x (NEGQ y)) 40158 // result: (ROLL x y) 40159 for { 40160 _ = v.Args[1] 40161 x := v.Args[0] 40162 v_1 := v.Args[1] 40163 if v_1.Op != OpAMD64NEGQ { 40164 break 40165 } 40166 y := v_1.Args[0] 40167 v.reset(OpAMD64ROLL) 40168 v.AddArg(x) 40169 v.AddArg(y) 40170 return true 40171 } 40172 // match: (RORL x (NEGL y)) 40173 // result: (ROLL x y) 40174 for { 40175 _ = v.Args[1] 40176 x := v.Args[0] 40177 v_1 := v.Args[1] 40178 if v_1.Op != OpAMD64NEGL { 40179 break 40180 } 40181 y := v_1.Args[0] 40182 v.reset(OpAMD64ROLL) 40183 v.AddArg(x) 40184 v.AddArg(y) 40185 return true 40186 } 40187 // match: (RORL x (MOVQconst [c])) 40188 // result: (ROLLconst [(-c)&31] x) 40189 for { 40190 _ = v.Args[1] 40191 x := v.Args[0] 40192 v_1 := v.Args[1] 40193 if v_1.Op != OpAMD64MOVQconst { 40194 break 40195 } 40196 c := v_1.AuxInt 40197 v.reset(OpAMD64ROLLconst) 40198 v.AuxInt = (-c) & 31 40199 v.AddArg(x) 40200 return true 40201 } 40202 // match: (RORL x (MOVLconst [c])) 40203 // result: (ROLLconst [(-c)&31] x) 40204 for { 40205 _ = v.Args[1] 40206 x := v.Args[0] 40207 v_1 := v.Args[1] 40208 if v_1.Op != OpAMD64MOVLconst { 40209 break 40210 } 40211 c := v_1.AuxInt 40212 v.reset(OpAMD64ROLLconst) 40213 v.AuxInt = (-c) & 31 40214 v.AddArg(x) 40215 return true 40216 } 40217 return false 40218 } 40219 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool { 40220 // match: (RORQ x (NEGQ y)) 40221 // result: (ROLQ x y) 40222 for { 40223 _ = v.Args[1] 40224 x := v.Args[0] 40225 v_1 := v.Args[1] 40226 if v_1.Op != OpAMD64NEGQ { 40227 break 40228 } 40229 y := v_1.Args[0] 40230 v.reset(OpAMD64ROLQ) 40231 v.AddArg(x) 40232 v.AddArg(y) 40233 return true 40234 } 40235 // match: (RORQ x (NEGL y)) 40236 // result: (ROLQ x y) 40237 for { 40238 _ = v.Args[1] 40239 x := v.Args[0] 40240 v_1 := v.Args[1] 40241 if v_1.Op != OpAMD64NEGL { 40242 break 40243 } 40244 y := v_1.Args[0] 40245 v.reset(OpAMD64ROLQ) 40246 v.AddArg(x) 40247 v.AddArg(y) 40248 return true 40249 } 40250 // match: (RORQ x (MOVQconst [c])) 40251 // result: (ROLQconst [(-c)&63] x) 40252 for { 40253 _ = v.Args[1] 40254 x := v.Args[0] 40255 v_1 := v.Args[1] 40256 if v_1.Op != OpAMD64MOVQconst { 40257 break 40258 } 40259 c := v_1.AuxInt 40260 v.reset(OpAMD64ROLQconst) 40261 v.AuxInt = (-c) & 63 40262 v.AddArg(x) 40263 return true 40264 } 40265 // match: (RORQ x (MOVLconst [c])) 40266 // result: (ROLQconst [(-c)&63] x) 40267 for { 40268 _ = v.Args[1] 40269 x := v.Args[0] 40270 v_1 := v.Args[1] 40271 if v_1.Op != OpAMD64MOVLconst { 40272 break 40273 } 40274 c := v_1.AuxInt 40275 v.reset(OpAMD64ROLQconst) 40276 v.AuxInt = (-c) & 63 40277 v.AddArg(x) 40278 return true 40279 } 40280 return false 40281 } 40282 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool { 40283 // match: (RORW x (NEGQ y)) 40284 // result: (ROLW x y) 40285 for { 40286 _ = v.Args[1] 40287 x := v.Args[0] 40288 v_1 := v.Args[1] 40289 if v_1.Op != OpAMD64NEGQ { 40290 break 40291 } 40292 y := v_1.Args[0] 40293 v.reset(OpAMD64ROLW) 40294 v.AddArg(x) 40295 v.AddArg(y) 40296 return true 40297 } 40298 // match: (RORW x (NEGL y)) 40299 // result: (ROLW x y) 40300 for { 40301 _ = v.Args[1] 40302 x := v.Args[0] 40303 v_1 := v.Args[1] 40304 if v_1.Op != OpAMD64NEGL { 40305 break 40306 } 40307 y := v_1.Args[0] 40308 v.reset(OpAMD64ROLW) 40309 v.AddArg(x) 40310 v.AddArg(y) 40311 return true 40312 } 40313 // match: (RORW x (MOVQconst [c])) 40314 // result: (ROLWconst [(-c)&15] x) 40315 for { 40316 _ = v.Args[1] 40317 x := v.Args[0] 40318 v_1 := v.Args[1] 40319 if v_1.Op != OpAMD64MOVQconst { 40320 break 40321 } 40322 c := v_1.AuxInt 40323 v.reset(OpAMD64ROLWconst) 40324 v.AuxInt = (-c) & 15 40325 v.AddArg(x) 40326 return true 40327 } 40328 // match: (RORW x (MOVLconst [c])) 40329 // result: (ROLWconst [(-c)&15] x) 40330 for { 40331 _ = v.Args[1] 40332 x := v.Args[0] 40333 v_1 := v.Args[1] 40334 if v_1.Op != OpAMD64MOVLconst { 40335 break 40336 } 40337 c := v_1.AuxInt 40338 v.reset(OpAMD64ROLWconst) 40339 v.AuxInt = (-c) & 15 40340 v.AddArg(x) 40341 return true 40342 } 40343 return false 40344 } 40345 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool { 40346 // match: (SARB x (MOVQconst [c])) 40347 // result: (SARBconst [min(c&31,7)] x) 40348 for { 40349 _ = v.Args[1] 40350 x := v.Args[0] 40351 v_1 := v.Args[1] 40352 if v_1.Op != OpAMD64MOVQconst { 40353 break 40354 } 40355 c := v_1.AuxInt 40356 v.reset(OpAMD64SARBconst) 40357 v.AuxInt = min(c&31, 7) 40358 v.AddArg(x) 40359 return true 40360 } 40361 // match: (SARB x (MOVLconst [c])) 40362 // result: (SARBconst [min(c&31,7)] x) 40363 for { 40364 _ = v.Args[1] 40365 x := v.Args[0] 40366 v_1 := v.Args[1] 40367 if v_1.Op != OpAMD64MOVLconst { 40368 break 40369 } 40370 c := v_1.AuxInt 40371 v.reset(OpAMD64SARBconst) 40372 v.AuxInt = min(c&31, 7) 40373 v.AddArg(x) 40374 return true 40375 } 40376 return false 40377 } 40378 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { 40379 // match: (SARBconst x [0]) 40380 // result: x 40381 for { 40382 if v.AuxInt != 0 { 40383 break 40384 } 40385 x := v.Args[0] 40386 v.reset(OpCopy) 40387 v.Type = x.Type 40388 v.AddArg(x) 40389 return true 40390 } 40391 // match: (SARBconst [c] (MOVQconst [d])) 40392 // result: (MOVQconst [int64(int8(d))>>uint64(c)]) 40393 for { 40394 c := v.AuxInt 40395 v_0 := v.Args[0] 40396 if v_0.Op != OpAMD64MOVQconst { 40397 break 40398 } 40399 d := v_0.AuxInt 40400 v.reset(OpAMD64MOVQconst) 40401 v.AuxInt = int64(int8(d)) >> uint64(c) 40402 return true 40403 } 40404 return false 40405 } 40406 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { 40407 b := v.Block 40408 // match: (SARL x (MOVQconst [c])) 40409 // result: (SARLconst [c&31] x) 40410 for { 40411 _ = v.Args[1] 40412 x := v.Args[0] 40413 v_1 := v.Args[1] 40414 if v_1.Op != OpAMD64MOVQconst { 40415 break 40416 } 40417 c := v_1.AuxInt 40418 v.reset(OpAMD64SARLconst) 40419 v.AuxInt = c & 31 40420 v.AddArg(x) 40421 return true 40422 } 40423 // match: (SARL x (MOVLconst [c])) 40424 // result: (SARLconst [c&31] x) 40425 for { 40426 _ = v.Args[1] 40427 x := v.Args[0] 40428 v_1 := v.Args[1] 40429 if v_1.Op != OpAMD64MOVLconst { 40430 break 40431 } 40432 c := v_1.AuxInt 40433 v.reset(OpAMD64SARLconst) 40434 v.AuxInt = c & 31 40435 v.AddArg(x) 40436 return true 40437 } 40438 // match: (SARL x (ADDQconst [c] y)) 40439 // cond: c & 31 == 0 40440 // result: (SARL x y) 40441 for { 40442 _ = v.Args[1] 40443 x := v.Args[0] 40444 v_1 := v.Args[1] 40445 if v_1.Op != OpAMD64ADDQconst { 40446 break 40447 } 40448 c := v_1.AuxInt 40449 y := v_1.Args[0] 40450 if !(c&31 == 0) { 40451 break 40452 } 40453 v.reset(OpAMD64SARL) 40454 v.AddArg(x) 40455 v.AddArg(y) 40456 return true 40457 } 40458 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 40459 // cond: c & 31 == 0 40460 // result: (SARL x (NEGQ <t> y)) 40461 for { 40462 _ = v.Args[1] 40463 x := v.Args[0] 40464 v_1 := v.Args[1] 40465 if v_1.Op != OpAMD64NEGQ { 40466 break 40467 } 40468 t := v_1.Type 40469 v_1_0 := v_1.Args[0] 40470 if v_1_0.Op != OpAMD64ADDQconst { 40471 break 40472 } 40473 c := v_1_0.AuxInt 40474 y := v_1_0.Args[0] 40475 if !(c&31 == 0) { 40476 break 40477 } 40478 v.reset(OpAMD64SARL) 40479 v.AddArg(x) 40480 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 40481 v0.AddArg(y) 40482 v.AddArg(v0) 40483 return true 40484 } 40485 // match: (SARL x (ANDQconst [c] y)) 40486 // cond: c & 31 == 31 40487 // result: (SARL x y) 40488 for { 40489 _ = v.Args[1] 40490 x := v.Args[0] 40491 v_1 := v.Args[1] 40492 if v_1.Op != OpAMD64ANDQconst { 40493 break 40494 } 40495 c := v_1.AuxInt 40496 y := v_1.Args[0] 40497 if !(c&31 == 31) { 40498 break 40499 } 40500 v.reset(OpAMD64SARL) 40501 v.AddArg(x) 40502 v.AddArg(y) 40503 return true 40504 } 40505 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 40506 // cond: c & 31 == 31 40507 // result: (SARL x (NEGQ <t> y)) 40508 for { 40509 _ = v.Args[1] 40510 x := v.Args[0] 40511 v_1 := v.Args[1] 40512 if v_1.Op != OpAMD64NEGQ { 40513 break 40514 } 40515 t := v_1.Type 40516 v_1_0 := v_1.Args[0] 40517 if v_1_0.Op != OpAMD64ANDQconst { 40518 break 40519 } 40520 c := v_1_0.AuxInt 40521 y := v_1_0.Args[0] 40522 if !(c&31 == 31) { 40523 break 40524 } 40525 v.reset(OpAMD64SARL) 40526 v.AddArg(x) 40527 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 40528 v0.AddArg(y) 40529 v.AddArg(v0) 40530 return true 40531 } 40532 // match: (SARL x (ADDLconst [c] y)) 40533 // cond: c & 31 == 0 40534 // result: (SARL x y) 40535 for { 40536 _ = v.Args[1] 40537 x := v.Args[0] 40538 v_1 := v.Args[1] 40539 if v_1.Op != OpAMD64ADDLconst { 40540 break 40541 } 40542 c := v_1.AuxInt 40543 y := v_1.Args[0] 40544 if !(c&31 == 0) { 40545 break 40546 } 40547 v.reset(OpAMD64SARL) 40548 v.AddArg(x) 40549 v.AddArg(y) 40550 return true 40551 } 40552 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 40553 // cond: c & 31 == 0 40554 // result: (SARL x (NEGL <t> y)) 40555 for { 40556 _ = v.Args[1] 40557 x := v.Args[0] 40558 v_1 := v.Args[1] 40559 if v_1.Op != OpAMD64NEGL { 40560 break 40561 } 40562 t := v_1.Type 40563 v_1_0 := v_1.Args[0] 40564 if v_1_0.Op != OpAMD64ADDLconst { 40565 break 40566 } 40567 c := v_1_0.AuxInt 40568 y := v_1_0.Args[0] 40569 if !(c&31 == 0) { 40570 break 40571 } 40572 v.reset(OpAMD64SARL) 40573 v.AddArg(x) 40574 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 40575 v0.AddArg(y) 40576 v.AddArg(v0) 40577 return true 40578 } 40579 // match: (SARL x (ANDLconst [c] y)) 40580 // cond: c & 31 == 31 40581 // result: (SARL x y) 40582 for { 40583 _ = v.Args[1] 40584 x := v.Args[0] 40585 v_1 := v.Args[1] 40586 if v_1.Op != OpAMD64ANDLconst { 40587 break 40588 } 40589 c := v_1.AuxInt 40590 y := v_1.Args[0] 40591 if !(c&31 == 31) { 40592 break 40593 } 40594 v.reset(OpAMD64SARL) 40595 v.AddArg(x) 40596 v.AddArg(y) 40597 return true 40598 } 40599 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 40600 // cond: c & 31 == 31 40601 // result: (SARL x (NEGL <t> y)) 40602 for { 40603 _ = v.Args[1] 40604 x := v.Args[0] 40605 v_1 := v.Args[1] 40606 if v_1.Op != OpAMD64NEGL { 40607 break 40608 } 40609 t := v_1.Type 40610 v_1_0 := v_1.Args[0] 40611 if v_1_0.Op != OpAMD64ANDLconst { 40612 break 40613 } 40614 c := v_1_0.AuxInt 40615 y := v_1_0.Args[0] 40616 if !(c&31 == 31) { 40617 break 40618 } 40619 v.reset(OpAMD64SARL) 40620 v.AddArg(x) 40621 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 40622 v0.AddArg(y) 40623 v.AddArg(v0) 40624 return true 40625 } 40626 return false 40627 } 40628 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { 40629 // match: (SARLconst x [0]) 40630 // result: x 40631 for { 40632 if v.AuxInt != 0 { 40633 break 40634 } 40635 x := v.Args[0] 40636 v.reset(OpCopy) 40637 v.Type = x.Type 40638 v.AddArg(x) 40639 return true 40640 } 40641 // match: (SARLconst [c] (MOVQconst [d])) 40642 // result: (MOVQconst [int64(int32(d))>>uint64(c)]) 40643 for { 40644 c := v.AuxInt 40645 v_0 := v.Args[0] 40646 if v_0.Op != OpAMD64MOVQconst { 40647 break 40648 } 40649 d := v_0.AuxInt 40650 v.reset(OpAMD64MOVQconst) 40651 v.AuxInt = int64(int32(d)) >> uint64(c) 40652 return true 40653 } 40654 return false 40655 } 40656 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { 40657 b := v.Block 40658 // match: (SARQ x (MOVQconst [c])) 40659 // result: (SARQconst [c&63] x) 40660 for { 40661 _ = v.Args[1] 40662 x := v.Args[0] 40663 v_1 := v.Args[1] 40664 if v_1.Op != OpAMD64MOVQconst { 40665 break 40666 } 40667 c := v_1.AuxInt 40668 v.reset(OpAMD64SARQconst) 40669 v.AuxInt = c & 63 40670 v.AddArg(x) 40671 return true 40672 } 40673 // match: (SARQ x (MOVLconst [c])) 40674 // result: (SARQconst [c&63] x) 40675 for { 40676 _ = v.Args[1] 40677 x := v.Args[0] 40678 v_1 := v.Args[1] 40679 if v_1.Op != OpAMD64MOVLconst { 40680 break 40681 } 40682 c := v_1.AuxInt 40683 v.reset(OpAMD64SARQconst) 40684 v.AuxInt = c & 63 40685 v.AddArg(x) 40686 return true 40687 } 40688 // match: (SARQ x (ADDQconst [c] y)) 40689 // cond: c & 63 == 0 40690 // result: (SARQ x y) 40691 for { 40692 _ = v.Args[1] 40693 x := v.Args[0] 40694 v_1 := v.Args[1] 40695 if v_1.Op != OpAMD64ADDQconst { 40696 break 40697 } 40698 c := v_1.AuxInt 40699 y := v_1.Args[0] 40700 if !(c&63 == 0) { 40701 break 40702 } 40703 v.reset(OpAMD64SARQ) 40704 v.AddArg(x) 40705 v.AddArg(y) 40706 return true 40707 } 40708 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 40709 // cond: c & 63 == 0 40710 // result: (SARQ x (NEGQ <t> y)) 40711 for { 40712 _ = v.Args[1] 40713 x := v.Args[0] 40714 v_1 := v.Args[1] 40715 if v_1.Op != OpAMD64NEGQ { 40716 break 40717 } 40718 t := v_1.Type 40719 v_1_0 := v_1.Args[0] 40720 if v_1_0.Op != OpAMD64ADDQconst { 40721 break 40722 } 40723 c := v_1_0.AuxInt 40724 y := v_1_0.Args[0] 40725 if !(c&63 == 0) { 40726 break 40727 } 40728 v.reset(OpAMD64SARQ) 40729 v.AddArg(x) 40730 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 40731 v0.AddArg(y) 40732 v.AddArg(v0) 40733 return true 40734 } 40735 // match: (SARQ x (ANDQconst [c] y)) 40736 // cond: c & 63 == 63 40737 // result: (SARQ x y) 40738 for { 40739 _ = v.Args[1] 40740 x := v.Args[0] 40741 v_1 := v.Args[1] 40742 if v_1.Op != OpAMD64ANDQconst { 40743 break 40744 } 40745 c := v_1.AuxInt 40746 y := v_1.Args[0] 40747 if !(c&63 == 63) { 40748 break 40749 } 40750 v.reset(OpAMD64SARQ) 40751 v.AddArg(x) 40752 v.AddArg(y) 40753 return true 40754 } 40755 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 40756 // cond: c & 63 == 63 40757 // result: (SARQ x (NEGQ <t> y)) 40758 for { 40759 _ = v.Args[1] 40760 x := v.Args[0] 40761 v_1 := v.Args[1] 40762 if v_1.Op != OpAMD64NEGQ { 40763 break 40764 } 40765 t := v_1.Type 40766 v_1_0 := v_1.Args[0] 40767 if v_1_0.Op != OpAMD64ANDQconst { 40768 break 40769 } 40770 c := v_1_0.AuxInt 40771 y := v_1_0.Args[0] 40772 if !(c&63 == 63) { 40773 break 40774 } 40775 v.reset(OpAMD64SARQ) 40776 v.AddArg(x) 40777 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 40778 v0.AddArg(y) 40779 v.AddArg(v0) 40780 return true 40781 } 40782 // match: (SARQ x (ADDLconst [c] y)) 40783 // cond: c & 63 == 0 40784 // result: (SARQ x y) 40785 for { 40786 _ = v.Args[1] 40787 x := v.Args[0] 40788 v_1 := v.Args[1] 40789 if v_1.Op != OpAMD64ADDLconst { 40790 break 40791 } 40792 c := v_1.AuxInt 40793 y := v_1.Args[0] 40794 if !(c&63 == 0) { 40795 break 40796 } 40797 v.reset(OpAMD64SARQ) 40798 v.AddArg(x) 40799 v.AddArg(y) 40800 return true 40801 } 40802 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 40803 // cond: c & 63 == 0 40804 // result: (SARQ x (NEGL <t> y)) 40805 for { 40806 _ = v.Args[1] 40807 x := v.Args[0] 40808 v_1 := v.Args[1] 40809 if v_1.Op != OpAMD64NEGL { 40810 break 40811 } 40812 t := v_1.Type 40813 v_1_0 := v_1.Args[0] 40814 if v_1_0.Op != OpAMD64ADDLconst { 40815 break 40816 } 40817 c := v_1_0.AuxInt 40818 y := v_1_0.Args[0] 40819 if !(c&63 == 0) { 40820 break 40821 } 40822 v.reset(OpAMD64SARQ) 40823 v.AddArg(x) 40824 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 40825 v0.AddArg(y) 40826 v.AddArg(v0) 40827 return true 40828 } 40829 // match: (SARQ x (ANDLconst [c] y)) 40830 // cond: c & 63 == 63 40831 // result: (SARQ x y) 40832 for { 40833 _ = v.Args[1] 40834 x := v.Args[0] 40835 v_1 := v.Args[1] 40836 if v_1.Op != OpAMD64ANDLconst { 40837 break 40838 } 40839 c := v_1.AuxInt 40840 y := v_1.Args[0] 40841 if !(c&63 == 63) { 40842 break 40843 } 40844 v.reset(OpAMD64SARQ) 40845 v.AddArg(x) 40846 v.AddArg(y) 40847 return true 40848 } 40849 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 40850 // cond: c & 63 == 63 40851 // result: (SARQ x (NEGL <t> y)) 40852 for { 40853 _ = v.Args[1] 40854 x := v.Args[0] 40855 v_1 := v.Args[1] 40856 if v_1.Op != OpAMD64NEGL { 40857 break 40858 } 40859 t := v_1.Type 40860 v_1_0 := v_1.Args[0] 40861 if v_1_0.Op != OpAMD64ANDLconst { 40862 break 40863 } 40864 c := v_1_0.AuxInt 40865 y := v_1_0.Args[0] 40866 if !(c&63 == 63) { 40867 break 40868 } 40869 v.reset(OpAMD64SARQ) 40870 v.AddArg(x) 40871 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 40872 v0.AddArg(y) 40873 v.AddArg(v0) 40874 return true 40875 } 40876 return false 40877 } 40878 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { 40879 // match: (SARQconst x [0]) 40880 // result: x 40881 for { 40882 if v.AuxInt != 0 { 40883 break 40884 } 40885 x := v.Args[0] 40886 v.reset(OpCopy) 40887 v.Type = x.Type 40888 v.AddArg(x) 40889 return true 40890 } 40891 // match: (SARQconst [c] (MOVQconst [d])) 40892 // result: (MOVQconst [d>>uint64(c)]) 40893 for { 40894 c := v.AuxInt 40895 v_0 := v.Args[0] 40896 if v_0.Op != OpAMD64MOVQconst { 40897 break 40898 } 40899 d := v_0.AuxInt 40900 v.reset(OpAMD64MOVQconst) 40901 v.AuxInt = d >> uint64(c) 40902 return true 40903 } 40904 return false 40905 } 40906 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { 40907 // match: (SARW x (MOVQconst [c])) 40908 // result: (SARWconst [min(c&31,15)] x) 40909 for { 40910 _ = v.Args[1] 40911 x := v.Args[0] 40912 v_1 := v.Args[1] 40913 if v_1.Op != OpAMD64MOVQconst { 40914 break 40915 } 40916 c := v_1.AuxInt 40917 v.reset(OpAMD64SARWconst) 40918 v.AuxInt = min(c&31, 15) 40919 v.AddArg(x) 40920 return true 40921 } 40922 // match: (SARW x (MOVLconst [c])) 40923 // result: (SARWconst [min(c&31,15)] x) 40924 for { 40925 _ = v.Args[1] 40926 x := v.Args[0] 40927 v_1 := v.Args[1] 40928 if v_1.Op != OpAMD64MOVLconst { 40929 break 40930 } 40931 c := v_1.AuxInt 40932 v.reset(OpAMD64SARWconst) 40933 v.AuxInt = min(c&31, 15) 40934 v.AddArg(x) 40935 return true 40936 } 40937 return false 40938 } 40939 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { 40940 // match: (SARWconst x [0]) 40941 // result: x 40942 for { 40943 if v.AuxInt != 0 { 40944 break 40945 } 40946 x := v.Args[0] 40947 v.reset(OpCopy) 40948 v.Type = x.Type 40949 v.AddArg(x) 40950 return true 40951 } 40952 // match: (SARWconst [c] (MOVQconst [d])) 40953 // result: (MOVQconst [int64(int16(d))>>uint64(c)]) 40954 for { 40955 c := v.AuxInt 40956 v_0 := v.Args[0] 40957 if v_0.Op != OpAMD64MOVQconst { 40958 break 40959 } 40960 d := v_0.AuxInt 40961 v.reset(OpAMD64MOVQconst) 40962 v.AuxInt = int64(int16(d)) >> uint64(c) 40963 return true 40964 } 40965 return false 40966 } 40967 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { 40968 // match: (SBBLcarrymask (FlagEQ)) 40969 // result: (MOVLconst [0]) 40970 for { 40971 v_0 := v.Args[0] 40972 if v_0.Op != OpAMD64FlagEQ { 40973 break 40974 } 40975 v.reset(OpAMD64MOVLconst) 40976 v.AuxInt = 0 40977 return true 40978 } 40979 // match: (SBBLcarrymask (FlagLT_ULT)) 40980 // result: (MOVLconst [-1]) 40981 for { 40982 v_0 := v.Args[0] 40983 if v_0.Op != OpAMD64FlagLT_ULT { 40984 break 40985 } 40986 v.reset(OpAMD64MOVLconst) 40987 v.AuxInt = -1 40988 return true 40989 } 40990 // match: (SBBLcarrymask (FlagLT_UGT)) 40991 // result: (MOVLconst [0]) 40992 for { 40993 v_0 := v.Args[0] 40994 if v_0.Op != OpAMD64FlagLT_UGT { 40995 break 40996 } 40997 v.reset(OpAMD64MOVLconst) 40998 v.AuxInt = 0 40999 return true 41000 } 41001 // match: (SBBLcarrymask (FlagGT_ULT)) 41002 // result: (MOVLconst [-1]) 41003 for { 41004 v_0 := v.Args[0] 41005 if v_0.Op != OpAMD64FlagGT_ULT { 41006 break 41007 } 41008 v.reset(OpAMD64MOVLconst) 41009 v.AuxInt = -1 41010 return true 41011 } 41012 // match: (SBBLcarrymask (FlagGT_UGT)) 41013 // result: (MOVLconst [0]) 41014 for { 41015 v_0 := v.Args[0] 41016 if v_0.Op != OpAMD64FlagGT_UGT { 41017 break 41018 } 41019 v.reset(OpAMD64MOVLconst) 41020 v.AuxInt = 0 41021 return true 41022 } 41023 return false 41024 } 41025 func rewriteValueAMD64_OpAMD64SBBQ_0(v *Value) bool { 41026 // match: (SBBQ x (MOVQconst [c]) borrow) 41027 // cond: is32Bit(c) 41028 // result: (SBBQconst x [c] borrow) 41029 for { 41030 borrow := v.Args[2] 41031 x := v.Args[0] 41032 v_1 := v.Args[1] 41033 if v_1.Op != OpAMD64MOVQconst { 41034 break 41035 } 41036 c := v_1.AuxInt 41037 if !(is32Bit(c)) { 41038 break 41039 } 41040 v.reset(OpAMD64SBBQconst) 41041 v.AuxInt = c 41042 v.AddArg(x) 41043 v.AddArg(borrow) 41044 return true 41045 } 41046 // match: (SBBQ x y (FlagEQ)) 41047 // result: (SUBQborrow x y) 41048 for { 41049 _ = v.Args[2] 41050 x := v.Args[0] 41051 y := v.Args[1] 41052 v_2 := v.Args[2] 41053 if v_2.Op != OpAMD64FlagEQ { 41054 break 41055 } 41056 v.reset(OpAMD64SUBQborrow) 41057 v.AddArg(x) 41058 v.AddArg(y) 41059 return true 41060 } 41061 return false 41062 } 41063 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { 41064 // match: (SBBQcarrymask (FlagEQ)) 41065 // result: (MOVQconst [0]) 41066 for { 41067 v_0 := v.Args[0] 41068 if v_0.Op != OpAMD64FlagEQ { 41069 break 41070 } 41071 v.reset(OpAMD64MOVQconst) 41072 v.AuxInt = 0 41073 return true 41074 } 41075 // match: (SBBQcarrymask (FlagLT_ULT)) 41076 // result: (MOVQconst [-1]) 41077 for { 41078 v_0 := v.Args[0] 41079 if v_0.Op != OpAMD64FlagLT_ULT { 41080 break 41081 } 41082 v.reset(OpAMD64MOVQconst) 41083 v.AuxInt = -1 41084 return true 41085 } 41086 // match: (SBBQcarrymask (FlagLT_UGT)) 41087 // result: (MOVQconst [0]) 41088 for { 41089 v_0 := v.Args[0] 41090 if v_0.Op != OpAMD64FlagLT_UGT { 41091 break 41092 } 41093 v.reset(OpAMD64MOVQconst) 41094 v.AuxInt = 0 41095 return true 41096 } 41097 // match: (SBBQcarrymask (FlagGT_ULT)) 41098 // result: (MOVQconst [-1]) 41099 for { 41100 v_0 := v.Args[0] 41101 if v_0.Op != OpAMD64FlagGT_ULT { 41102 break 41103 } 41104 v.reset(OpAMD64MOVQconst) 41105 v.AuxInt = -1 41106 return true 41107 } 41108 // match: (SBBQcarrymask (FlagGT_UGT)) 41109 // result: (MOVQconst [0]) 41110 for { 41111 v_0 := v.Args[0] 41112 if v_0.Op != OpAMD64FlagGT_UGT { 41113 break 41114 } 41115 v.reset(OpAMD64MOVQconst) 41116 v.AuxInt = 0 41117 return true 41118 } 41119 return false 41120 } 41121 func rewriteValueAMD64_OpAMD64SBBQconst_0(v *Value) bool { 41122 // match: (SBBQconst x [c] (FlagEQ)) 41123 // result: (SUBQconstborrow x [c]) 41124 for { 41125 c := v.AuxInt 41126 _ = v.Args[1] 41127 x := v.Args[0] 41128 v_1 := v.Args[1] 41129 if v_1.Op != OpAMD64FlagEQ { 41130 break 41131 } 41132 v.reset(OpAMD64SUBQconstborrow) 41133 v.AuxInt = c 41134 v.AddArg(x) 41135 return true 41136 } 41137 return false 41138 } 41139 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { 41140 // match: (SETA (InvertFlags x)) 41141 // result: (SETB x) 41142 for { 41143 v_0 := v.Args[0] 41144 if v_0.Op != OpAMD64InvertFlags { 41145 break 41146 } 41147 x := v_0.Args[0] 41148 v.reset(OpAMD64SETB) 41149 v.AddArg(x) 41150 return true 41151 } 41152 // match: (SETA (FlagEQ)) 41153 // result: (MOVLconst [0]) 41154 for { 41155 v_0 := v.Args[0] 41156 if v_0.Op != OpAMD64FlagEQ { 41157 break 41158 } 41159 v.reset(OpAMD64MOVLconst) 41160 v.AuxInt = 0 41161 return true 41162 } 41163 // match: (SETA (FlagLT_ULT)) 41164 // result: (MOVLconst [0]) 41165 for { 41166 v_0 := v.Args[0] 41167 if v_0.Op != OpAMD64FlagLT_ULT { 41168 break 41169 } 41170 v.reset(OpAMD64MOVLconst) 41171 v.AuxInt = 0 41172 return true 41173 } 41174 // match: (SETA (FlagLT_UGT)) 41175 // result: (MOVLconst [1]) 41176 for { 41177 v_0 := v.Args[0] 41178 if v_0.Op != OpAMD64FlagLT_UGT { 41179 break 41180 } 41181 v.reset(OpAMD64MOVLconst) 41182 v.AuxInt = 1 41183 return true 41184 } 41185 // match: (SETA (FlagGT_ULT)) 41186 // result: (MOVLconst [0]) 41187 for { 41188 v_0 := v.Args[0] 41189 if v_0.Op != OpAMD64FlagGT_ULT { 41190 break 41191 } 41192 v.reset(OpAMD64MOVLconst) 41193 v.AuxInt = 0 41194 return true 41195 } 41196 // match: (SETA (FlagGT_UGT)) 41197 // result: (MOVLconst [1]) 41198 for { 41199 v_0 := v.Args[0] 41200 if v_0.Op != OpAMD64FlagGT_UGT { 41201 break 41202 } 41203 v.reset(OpAMD64MOVLconst) 41204 v.AuxInt = 1 41205 return true 41206 } 41207 return false 41208 } 41209 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { 41210 // match: (SETAE (InvertFlags x)) 41211 // result: (SETBE x) 41212 for { 41213 v_0 := v.Args[0] 41214 if v_0.Op != OpAMD64InvertFlags { 41215 break 41216 } 41217 x := v_0.Args[0] 41218 v.reset(OpAMD64SETBE) 41219 v.AddArg(x) 41220 return true 41221 } 41222 // match: (SETAE (FlagEQ)) 41223 // result: (MOVLconst [1]) 41224 for { 41225 v_0 := v.Args[0] 41226 if v_0.Op != OpAMD64FlagEQ { 41227 break 41228 } 41229 v.reset(OpAMD64MOVLconst) 41230 v.AuxInt = 1 41231 return true 41232 } 41233 // match: (SETAE (FlagLT_ULT)) 41234 // result: (MOVLconst [0]) 41235 for { 41236 v_0 := v.Args[0] 41237 if v_0.Op != OpAMD64FlagLT_ULT { 41238 break 41239 } 41240 v.reset(OpAMD64MOVLconst) 41241 v.AuxInt = 0 41242 return true 41243 } 41244 // match: (SETAE (FlagLT_UGT)) 41245 // result: (MOVLconst [1]) 41246 for { 41247 v_0 := v.Args[0] 41248 if v_0.Op != OpAMD64FlagLT_UGT { 41249 break 41250 } 41251 v.reset(OpAMD64MOVLconst) 41252 v.AuxInt = 1 41253 return true 41254 } 41255 // match: (SETAE (FlagGT_ULT)) 41256 // result: (MOVLconst [0]) 41257 for { 41258 v_0 := v.Args[0] 41259 if v_0.Op != OpAMD64FlagGT_ULT { 41260 break 41261 } 41262 v.reset(OpAMD64MOVLconst) 41263 v.AuxInt = 0 41264 return true 41265 } 41266 // match: (SETAE (FlagGT_UGT)) 41267 // result: (MOVLconst [1]) 41268 for { 41269 v_0 := v.Args[0] 41270 if v_0.Op != OpAMD64FlagGT_UGT { 41271 break 41272 } 41273 v.reset(OpAMD64MOVLconst) 41274 v.AuxInt = 1 41275 return true 41276 } 41277 return false 41278 } 41279 func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool { 41280 b := v.Block 41281 typ := &b.Func.Config.Types 41282 // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem) 41283 // result: (SETBEstore [off] {sym} ptr x mem) 41284 for { 41285 off := v.AuxInt 41286 sym := v.Aux 41287 mem := v.Args[2] 41288 ptr := v.Args[0] 41289 v_1 := v.Args[1] 41290 if v_1.Op != OpAMD64InvertFlags { 41291 break 41292 } 41293 x := v_1.Args[0] 41294 v.reset(OpAMD64SETBEstore) 41295 v.AuxInt = off 41296 v.Aux = sym 41297 v.AddArg(ptr) 41298 v.AddArg(x) 41299 v.AddArg(mem) 41300 return true 41301 } 41302 // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem) 41303 // cond: is32Bit(off1+off2) 41304 // result: (SETAEstore [off1+off2] {sym} base val mem) 41305 for { 41306 off1 := v.AuxInt 41307 sym := v.Aux 41308 mem := v.Args[2] 41309 v_0 := v.Args[0] 41310 if v_0.Op != OpAMD64ADDQconst { 41311 break 41312 } 41313 off2 := v_0.AuxInt 41314 base := v_0.Args[0] 41315 val := v.Args[1] 41316 if !(is32Bit(off1 + off2)) { 41317 break 41318 } 41319 v.reset(OpAMD64SETAEstore) 41320 v.AuxInt = off1 + off2 41321 v.Aux = sym 41322 v.AddArg(base) 41323 v.AddArg(val) 41324 v.AddArg(mem) 41325 return true 41326 } 41327 // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 41328 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 41329 // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 41330 for { 41331 off1 := v.AuxInt 41332 sym1 := v.Aux 41333 mem := v.Args[2] 41334 v_0 := v.Args[0] 41335 if v_0.Op != OpAMD64LEAQ { 41336 break 41337 } 41338 off2 := v_0.AuxInt 41339 sym2 := v_0.Aux 41340 base := v_0.Args[0] 41341 val := v.Args[1] 41342 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 41343 break 41344 } 41345 v.reset(OpAMD64SETAEstore) 41346 v.AuxInt = off1 + off2 41347 v.Aux = mergeSym(sym1, sym2) 41348 v.AddArg(base) 41349 v.AddArg(val) 41350 v.AddArg(mem) 41351 return true 41352 } 41353 // match: (SETAEstore [off] {sym} ptr (FlagEQ) mem) 41354 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 41355 for { 41356 off := v.AuxInt 41357 sym := v.Aux 41358 mem := v.Args[2] 41359 ptr := v.Args[0] 41360 v_1 := v.Args[1] 41361 if v_1.Op != OpAMD64FlagEQ { 41362 break 41363 } 41364 v.reset(OpAMD64MOVBstore) 41365 v.AuxInt = off 41366 v.Aux = sym 41367 v.AddArg(ptr) 41368 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41369 v0.AuxInt = 1 41370 v.AddArg(v0) 41371 v.AddArg(mem) 41372 return true 41373 } 41374 // match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) 41375 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 41376 for { 41377 off := v.AuxInt 41378 sym := v.Aux 41379 mem := v.Args[2] 41380 ptr := v.Args[0] 41381 v_1 := v.Args[1] 41382 if v_1.Op != OpAMD64FlagLT_ULT { 41383 break 41384 } 41385 v.reset(OpAMD64MOVBstore) 41386 v.AuxInt = off 41387 v.Aux = sym 41388 v.AddArg(ptr) 41389 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41390 v0.AuxInt = 0 41391 v.AddArg(v0) 41392 v.AddArg(mem) 41393 return true 41394 } 41395 // match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) 41396 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 41397 for { 41398 off := v.AuxInt 41399 sym := v.Aux 41400 mem := v.Args[2] 41401 ptr := v.Args[0] 41402 v_1 := v.Args[1] 41403 if v_1.Op != OpAMD64FlagLT_UGT { 41404 break 41405 } 41406 v.reset(OpAMD64MOVBstore) 41407 v.AuxInt = off 41408 v.Aux = sym 41409 v.AddArg(ptr) 41410 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41411 v0.AuxInt = 1 41412 v.AddArg(v0) 41413 v.AddArg(mem) 41414 return true 41415 } 41416 // match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) 41417 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 41418 for { 41419 off := v.AuxInt 41420 sym := v.Aux 41421 mem := v.Args[2] 41422 ptr := v.Args[0] 41423 v_1 := v.Args[1] 41424 if v_1.Op != OpAMD64FlagGT_ULT { 41425 break 41426 } 41427 v.reset(OpAMD64MOVBstore) 41428 v.AuxInt = off 41429 v.Aux = sym 41430 v.AddArg(ptr) 41431 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41432 v0.AuxInt = 0 41433 v.AddArg(v0) 41434 v.AddArg(mem) 41435 return true 41436 } 41437 // match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) 41438 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 41439 for { 41440 off := v.AuxInt 41441 sym := v.Aux 41442 mem := v.Args[2] 41443 ptr := v.Args[0] 41444 v_1 := v.Args[1] 41445 if v_1.Op != OpAMD64FlagGT_UGT { 41446 break 41447 } 41448 v.reset(OpAMD64MOVBstore) 41449 v.AuxInt = off 41450 v.Aux = sym 41451 v.AddArg(ptr) 41452 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41453 v0.AuxInt = 1 41454 v.AddArg(v0) 41455 v.AddArg(mem) 41456 return true 41457 } 41458 return false 41459 } 41460 func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool { 41461 b := v.Block 41462 typ := &b.Func.Config.Types 41463 // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem) 41464 // result: (SETBstore [off] {sym} ptr x mem) 41465 for { 41466 off := v.AuxInt 41467 sym := v.Aux 41468 mem := v.Args[2] 41469 ptr := v.Args[0] 41470 v_1 := v.Args[1] 41471 if v_1.Op != OpAMD64InvertFlags { 41472 break 41473 } 41474 x := v_1.Args[0] 41475 v.reset(OpAMD64SETBstore) 41476 v.AuxInt = off 41477 v.Aux = sym 41478 v.AddArg(ptr) 41479 v.AddArg(x) 41480 v.AddArg(mem) 41481 return true 41482 } 41483 // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem) 41484 // cond: is32Bit(off1+off2) 41485 // result: (SETAstore [off1+off2] {sym} base val mem) 41486 for { 41487 off1 := v.AuxInt 41488 sym := v.Aux 41489 mem := v.Args[2] 41490 v_0 := v.Args[0] 41491 if v_0.Op != OpAMD64ADDQconst { 41492 break 41493 } 41494 off2 := v_0.AuxInt 41495 base := v_0.Args[0] 41496 val := v.Args[1] 41497 if !(is32Bit(off1 + off2)) { 41498 break 41499 } 41500 v.reset(OpAMD64SETAstore) 41501 v.AuxInt = off1 + off2 41502 v.Aux = sym 41503 v.AddArg(base) 41504 v.AddArg(val) 41505 v.AddArg(mem) 41506 return true 41507 } 41508 // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 41509 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 41510 // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 41511 for { 41512 off1 := v.AuxInt 41513 sym1 := v.Aux 41514 mem := v.Args[2] 41515 v_0 := v.Args[0] 41516 if v_0.Op != OpAMD64LEAQ { 41517 break 41518 } 41519 off2 := v_0.AuxInt 41520 sym2 := v_0.Aux 41521 base := v_0.Args[0] 41522 val := v.Args[1] 41523 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 41524 break 41525 } 41526 v.reset(OpAMD64SETAstore) 41527 v.AuxInt = off1 + off2 41528 v.Aux = mergeSym(sym1, sym2) 41529 v.AddArg(base) 41530 v.AddArg(val) 41531 v.AddArg(mem) 41532 return true 41533 } 41534 // match: (SETAstore [off] {sym} ptr (FlagEQ) mem) 41535 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 41536 for { 41537 off := v.AuxInt 41538 sym := v.Aux 41539 mem := v.Args[2] 41540 ptr := v.Args[0] 41541 v_1 := v.Args[1] 41542 if v_1.Op != OpAMD64FlagEQ { 41543 break 41544 } 41545 v.reset(OpAMD64MOVBstore) 41546 v.AuxInt = off 41547 v.Aux = sym 41548 v.AddArg(ptr) 41549 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41550 v0.AuxInt = 0 41551 v.AddArg(v0) 41552 v.AddArg(mem) 41553 return true 41554 } 41555 // match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem) 41556 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 41557 for { 41558 off := v.AuxInt 41559 sym := v.Aux 41560 mem := v.Args[2] 41561 ptr := v.Args[0] 41562 v_1 := v.Args[1] 41563 if v_1.Op != OpAMD64FlagLT_ULT { 41564 break 41565 } 41566 v.reset(OpAMD64MOVBstore) 41567 v.AuxInt = off 41568 v.Aux = sym 41569 v.AddArg(ptr) 41570 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41571 v0.AuxInt = 0 41572 v.AddArg(v0) 41573 v.AddArg(mem) 41574 return true 41575 } 41576 // match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem) 41577 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 41578 for { 41579 off := v.AuxInt 41580 sym := v.Aux 41581 mem := v.Args[2] 41582 ptr := v.Args[0] 41583 v_1 := v.Args[1] 41584 if v_1.Op != OpAMD64FlagLT_UGT { 41585 break 41586 } 41587 v.reset(OpAMD64MOVBstore) 41588 v.AuxInt = off 41589 v.Aux = sym 41590 v.AddArg(ptr) 41591 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41592 v0.AuxInt = 1 41593 v.AddArg(v0) 41594 v.AddArg(mem) 41595 return true 41596 } 41597 // match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem) 41598 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 41599 for { 41600 off := v.AuxInt 41601 sym := v.Aux 41602 mem := v.Args[2] 41603 ptr := v.Args[0] 41604 v_1 := v.Args[1] 41605 if v_1.Op != OpAMD64FlagGT_ULT { 41606 break 41607 } 41608 v.reset(OpAMD64MOVBstore) 41609 v.AuxInt = off 41610 v.Aux = sym 41611 v.AddArg(ptr) 41612 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41613 v0.AuxInt = 0 41614 v.AddArg(v0) 41615 v.AddArg(mem) 41616 return true 41617 } 41618 // match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem) 41619 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 41620 for { 41621 off := v.AuxInt 41622 sym := v.Aux 41623 mem := v.Args[2] 41624 ptr := v.Args[0] 41625 v_1 := v.Args[1] 41626 if v_1.Op != OpAMD64FlagGT_UGT { 41627 break 41628 } 41629 v.reset(OpAMD64MOVBstore) 41630 v.AuxInt = off 41631 v.Aux = sym 41632 v.AddArg(ptr) 41633 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41634 v0.AuxInt = 1 41635 v.AddArg(v0) 41636 v.AddArg(mem) 41637 return true 41638 } 41639 return false 41640 } 41641 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { 41642 // match: (SETB (InvertFlags x)) 41643 // result: (SETA x) 41644 for { 41645 v_0 := v.Args[0] 41646 if v_0.Op != OpAMD64InvertFlags { 41647 break 41648 } 41649 x := v_0.Args[0] 41650 v.reset(OpAMD64SETA) 41651 v.AddArg(x) 41652 return true 41653 } 41654 // match: (SETB (FlagEQ)) 41655 // result: (MOVLconst [0]) 41656 for { 41657 v_0 := v.Args[0] 41658 if v_0.Op != OpAMD64FlagEQ { 41659 break 41660 } 41661 v.reset(OpAMD64MOVLconst) 41662 v.AuxInt = 0 41663 return true 41664 } 41665 // match: (SETB (FlagLT_ULT)) 41666 // result: (MOVLconst [1]) 41667 for { 41668 v_0 := v.Args[0] 41669 if v_0.Op != OpAMD64FlagLT_ULT { 41670 break 41671 } 41672 v.reset(OpAMD64MOVLconst) 41673 v.AuxInt = 1 41674 return true 41675 } 41676 // match: (SETB (FlagLT_UGT)) 41677 // result: (MOVLconst [0]) 41678 for { 41679 v_0 := v.Args[0] 41680 if v_0.Op != OpAMD64FlagLT_UGT { 41681 break 41682 } 41683 v.reset(OpAMD64MOVLconst) 41684 v.AuxInt = 0 41685 return true 41686 } 41687 // match: (SETB (FlagGT_ULT)) 41688 // result: (MOVLconst [1]) 41689 for { 41690 v_0 := v.Args[0] 41691 if v_0.Op != OpAMD64FlagGT_ULT { 41692 break 41693 } 41694 v.reset(OpAMD64MOVLconst) 41695 v.AuxInt = 1 41696 return true 41697 } 41698 // match: (SETB (FlagGT_UGT)) 41699 // result: (MOVLconst [0]) 41700 for { 41701 v_0 := v.Args[0] 41702 if v_0.Op != OpAMD64FlagGT_UGT { 41703 break 41704 } 41705 v.reset(OpAMD64MOVLconst) 41706 v.AuxInt = 0 41707 return true 41708 } 41709 return false 41710 } 41711 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { 41712 // match: (SETBE (InvertFlags x)) 41713 // result: (SETAE x) 41714 for { 41715 v_0 := v.Args[0] 41716 if v_0.Op != OpAMD64InvertFlags { 41717 break 41718 } 41719 x := v_0.Args[0] 41720 v.reset(OpAMD64SETAE) 41721 v.AddArg(x) 41722 return true 41723 } 41724 // match: (SETBE (FlagEQ)) 41725 // result: (MOVLconst [1]) 41726 for { 41727 v_0 := v.Args[0] 41728 if v_0.Op != OpAMD64FlagEQ { 41729 break 41730 } 41731 v.reset(OpAMD64MOVLconst) 41732 v.AuxInt = 1 41733 return true 41734 } 41735 // match: (SETBE (FlagLT_ULT)) 41736 // result: (MOVLconst [1]) 41737 for { 41738 v_0 := v.Args[0] 41739 if v_0.Op != OpAMD64FlagLT_ULT { 41740 break 41741 } 41742 v.reset(OpAMD64MOVLconst) 41743 v.AuxInt = 1 41744 return true 41745 } 41746 // match: (SETBE (FlagLT_UGT)) 41747 // result: (MOVLconst [0]) 41748 for { 41749 v_0 := v.Args[0] 41750 if v_0.Op != OpAMD64FlagLT_UGT { 41751 break 41752 } 41753 v.reset(OpAMD64MOVLconst) 41754 v.AuxInt = 0 41755 return true 41756 } 41757 // match: (SETBE (FlagGT_ULT)) 41758 // result: (MOVLconst [1]) 41759 for { 41760 v_0 := v.Args[0] 41761 if v_0.Op != OpAMD64FlagGT_ULT { 41762 break 41763 } 41764 v.reset(OpAMD64MOVLconst) 41765 v.AuxInt = 1 41766 return true 41767 } 41768 // match: (SETBE (FlagGT_UGT)) 41769 // result: (MOVLconst [0]) 41770 for { 41771 v_0 := v.Args[0] 41772 if v_0.Op != OpAMD64FlagGT_UGT { 41773 break 41774 } 41775 v.reset(OpAMD64MOVLconst) 41776 v.AuxInt = 0 41777 return true 41778 } 41779 return false 41780 } 41781 func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool { 41782 b := v.Block 41783 typ := &b.Func.Config.Types 41784 // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem) 41785 // result: (SETAEstore [off] {sym} ptr x mem) 41786 for { 41787 off := v.AuxInt 41788 sym := v.Aux 41789 mem := v.Args[2] 41790 ptr := v.Args[0] 41791 v_1 := v.Args[1] 41792 if v_1.Op != OpAMD64InvertFlags { 41793 break 41794 } 41795 x := v_1.Args[0] 41796 v.reset(OpAMD64SETAEstore) 41797 v.AuxInt = off 41798 v.Aux = sym 41799 v.AddArg(ptr) 41800 v.AddArg(x) 41801 v.AddArg(mem) 41802 return true 41803 } 41804 // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem) 41805 // cond: is32Bit(off1+off2) 41806 // result: (SETBEstore [off1+off2] {sym} base val mem) 41807 for { 41808 off1 := v.AuxInt 41809 sym := v.Aux 41810 mem := v.Args[2] 41811 v_0 := v.Args[0] 41812 if v_0.Op != OpAMD64ADDQconst { 41813 break 41814 } 41815 off2 := v_0.AuxInt 41816 base := v_0.Args[0] 41817 val := v.Args[1] 41818 if !(is32Bit(off1 + off2)) { 41819 break 41820 } 41821 v.reset(OpAMD64SETBEstore) 41822 v.AuxInt = off1 + off2 41823 v.Aux = sym 41824 v.AddArg(base) 41825 v.AddArg(val) 41826 v.AddArg(mem) 41827 return true 41828 } 41829 // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 41830 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 41831 // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 41832 for { 41833 off1 := v.AuxInt 41834 sym1 := v.Aux 41835 mem := v.Args[2] 41836 v_0 := v.Args[0] 41837 if v_0.Op != OpAMD64LEAQ { 41838 break 41839 } 41840 off2 := v_0.AuxInt 41841 sym2 := v_0.Aux 41842 base := v_0.Args[0] 41843 val := v.Args[1] 41844 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 41845 break 41846 } 41847 v.reset(OpAMD64SETBEstore) 41848 v.AuxInt = off1 + off2 41849 v.Aux = mergeSym(sym1, sym2) 41850 v.AddArg(base) 41851 v.AddArg(val) 41852 v.AddArg(mem) 41853 return true 41854 } 41855 // match: (SETBEstore [off] {sym} ptr (FlagEQ) mem) 41856 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 41857 for { 41858 off := v.AuxInt 41859 sym := v.Aux 41860 mem := v.Args[2] 41861 ptr := v.Args[0] 41862 v_1 := v.Args[1] 41863 if v_1.Op != OpAMD64FlagEQ { 41864 break 41865 } 41866 v.reset(OpAMD64MOVBstore) 41867 v.AuxInt = off 41868 v.Aux = sym 41869 v.AddArg(ptr) 41870 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41871 v0.AuxInt = 1 41872 v.AddArg(v0) 41873 v.AddArg(mem) 41874 return true 41875 } 41876 // match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) 41877 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 41878 for { 41879 off := v.AuxInt 41880 sym := v.Aux 41881 mem := v.Args[2] 41882 ptr := v.Args[0] 41883 v_1 := v.Args[1] 41884 if v_1.Op != OpAMD64FlagLT_ULT { 41885 break 41886 } 41887 v.reset(OpAMD64MOVBstore) 41888 v.AuxInt = off 41889 v.Aux = sym 41890 v.AddArg(ptr) 41891 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41892 v0.AuxInt = 1 41893 v.AddArg(v0) 41894 v.AddArg(mem) 41895 return true 41896 } 41897 // match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) 41898 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 41899 for { 41900 off := v.AuxInt 41901 sym := v.Aux 41902 mem := v.Args[2] 41903 ptr := v.Args[0] 41904 v_1 := v.Args[1] 41905 if v_1.Op != OpAMD64FlagLT_UGT { 41906 break 41907 } 41908 v.reset(OpAMD64MOVBstore) 41909 v.AuxInt = off 41910 v.Aux = sym 41911 v.AddArg(ptr) 41912 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41913 v0.AuxInt = 0 41914 v.AddArg(v0) 41915 v.AddArg(mem) 41916 return true 41917 } 41918 // match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) 41919 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 41920 for { 41921 off := v.AuxInt 41922 sym := v.Aux 41923 mem := v.Args[2] 41924 ptr := v.Args[0] 41925 v_1 := v.Args[1] 41926 if v_1.Op != OpAMD64FlagGT_ULT { 41927 break 41928 } 41929 v.reset(OpAMD64MOVBstore) 41930 v.AuxInt = off 41931 v.Aux = sym 41932 v.AddArg(ptr) 41933 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41934 v0.AuxInt = 1 41935 v.AddArg(v0) 41936 v.AddArg(mem) 41937 return true 41938 } 41939 // match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) 41940 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 41941 for { 41942 off := v.AuxInt 41943 sym := v.Aux 41944 mem := v.Args[2] 41945 ptr := v.Args[0] 41946 v_1 := v.Args[1] 41947 if v_1.Op != OpAMD64FlagGT_UGT { 41948 break 41949 } 41950 v.reset(OpAMD64MOVBstore) 41951 v.AuxInt = off 41952 v.Aux = sym 41953 v.AddArg(ptr) 41954 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 41955 v0.AuxInt = 0 41956 v.AddArg(v0) 41957 v.AddArg(mem) 41958 return true 41959 } 41960 return false 41961 } 41962 func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool { 41963 b := v.Block 41964 typ := &b.Func.Config.Types 41965 // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem) 41966 // result: (SETAstore [off] {sym} ptr x mem) 41967 for { 41968 off := v.AuxInt 41969 sym := v.Aux 41970 mem := v.Args[2] 41971 ptr := v.Args[0] 41972 v_1 := v.Args[1] 41973 if v_1.Op != OpAMD64InvertFlags { 41974 break 41975 } 41976 x := v_1.Args[0] 41977 v.reset(OpAMD64SETAstore) 41978 v.AuxInt = off 41979 v.Aux = sym 41980 v.AddArg(ptr) 41981 v.AddArg(x) 41982 v.AddArg(mem) 41983 return true 41984 } 41985 // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem) 41986 // cond: is32Bit(off1+off2) 41987 // result: (SETBstore [off1+off2] {sym} base val mem) 41988 for { 41989 off1 := v.AuxInt 41990 sym := v.Aux 41991 mem := v.Args[2] 41992 v_0 := v.Args[0] 41993 if v_0.Op != OpAMD64ADDQconst { 41994 break 41995 } 41996 off2 := v_0.AuxInt 41997 base := v_0.Args[0] 41998 val := v.Args[1] 41999 if !(is32Bit(off1 + off2)) { 42000 break 42001 } 42002 v.reset(OpAMD64SETBstore) 42003 v.AuxInt = off1 + off2 42004 v.Aux = sym 42005 v.AddArg(base) 42006 v.AddArg(val) 42007 v.AddArg(mem) 42008 return true 42009 } 42010 // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 42011 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 42012 // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 42013 for { 42014 off1 := v.AuxInt 42015 sym1 := v.Aux 42016 mem := v.Args[2] 42017 v_0 := v.Args[0] 42018 if v_0.Op != OpAMD64LEAQ { 42019 break 42020 } 42021 off2 := v_0.AuxInt 42022 sym2 := v_0.Aux 42023 base := v_0.Args[0] 42024 val := v.Args[1] 42025 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 42026 break 42027 } 42028 v.reset(OpAMD64SETBstore) 42029 v.AuxInt = off1 + off2 42030 v.Aux = mergeSym(sym1, sym2) 42031 v.AddArg(base) 42032 v.AddArg(val) 42033 v.AddArg(mem) 42034 return true 42035 } 42036 // match: (SETBstore [off] {sym} ptr (FlagEQ) mem) 42037 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 42038 for { 42039 off := v.AuxInt 42040 sym := v.Aux 42041 mem := v.Args[2] 42042 ptr := v.Args[0] 42043 v_1 := v.Args[1] 42044 if v_1.Op != OpAMD64FlagEQ { 42045 break 42046 } 42047 v.reset(OpAMD64MOVBstore) 42048 v.AuxInt = off 42049 v.Aux = sym 42050 v.AddArg(ptr) 42051 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 42052 v0.AuxInt = 0 42053 v.AddArg(v0) 42054 v.AddArg(mem) 42055 return true 42056 } 42057 // match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem) 42058 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 42059 for { 42060 off := v.AuxInt 42061 sym := v.Aux 42062 mem := v.Args[2] 42063 ptr := v.Args[0] 42064 v_1 := v.Args[1] 42065 if v_1.Op != OpAMD64FlagLT_ULT { 42066 break 42067 } 42068 v.reset(OpAMD64MOVBstore) 42069 v.AuxInt = off 42070 v.Aux = sym 42071 v.AddArg(ptr) 42072 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 42073 v0.AuxInt = 1 42074 v.AddArg(v0) 42075 v.AddArg(mem) 42076 return true 42077 } 42078 // match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem) 42079 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 42080 for { 42081 off := v.AuxInt 42082 sym := v.Aux 42083 mem := v.Args[2] 42084 ptr := v.Args[0] 42085 v_1 := v.Args[1] 42086 if v_1.Op != OpAMD64FlagLT_UGT { 42087 break 42088 } 42089 v.reset(OpAMD64MOVBstore) 42090 v.AuxInt = off 42091 v.Aux = sym 42092 v.AddArg(ptr) 42093 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 42094 v0.AuxInt = 0 42095 v.AddArg(v0) 42096 v.AddArg(mem) 42097 return true 42098 } 42099 // match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem) 42100 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 42101 for { 42102 off := v.AuxInt 42103 sym := v.Aux 42104 mem := v.Args[2] 42105 ptr := v.Args[0] 42106 v_1 := v.Args[1] 42107 if v_1.Op != OpAMD64FlagGT_ULT { 42108 break 42109 } 42110 v.reset(OpAMD64MOVBstore) 42111 v.AuxInt = off 42112 v.Aux = sym 42113 v.AddArg(ptr) 42114 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 42115 v0.AuxInt = 1 42116 v.AddArg(v0) 42117 v.AddArg(mem) 42118 return true 42119 } 42120 // match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem) 42121 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 42122 for { 42123 off := v.AuxInt 42124 sym := v.Aux 42125 mem := v.Args[2] 42126 ptr := v.Args[0] 42127 v_1 := v.Args[1] 42128 if v_1.Op != OpAMD64FlagGT_UGT { 42129 break 42130 } 42131 v.reset(OpAMD64MOVBstore) 42132 v.AuxInt = off 42133 v.Aux = sym 42134 v.AddArg(ptr) 42135 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 42136 v0.AuxInt = 0 42137 v.AddArg(v0) 42138 v.AddArg(mem) 42139 return true 42140 } 42141 return false 42142 } 42143 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { 42144 b := v.Block 42145 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 42146 // result: (SETAE (BTL x y)) 42147 for { 42148 v_0 := v.Args[0] 42149 if v_0.Op != OpAMD64TESTL { 42150 break 42151 } 42152 y := v_0.Args[1] 42153 v_0_0 := v_0.Args[0] 42154 if v_0_0.Op != OpAMD64SHLL { 42155 break 42156 } 42157 x := v_0_0.Args[1] 42158 v_0_0_0 := v_0_0.Args[0] 42159 if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { 42160 break 42161 } 42162 v.reset(OpAMD64SETAE) 42163 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 42164 v0.AddArg(x) 42165 v0.AddArg(y) 42166 v.AddArg(v0) 42167 return true 42168 } 42169 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 42170 // result: (SETAE (BTL x y)) 42171 for { 42172 v_0 := v.Args[0] 42173 if v_0.Op != OpAMD64TESTL { 42174 break 42175 } 42176 _ = v_0.Args[1] 42177 y := v_0.Args[0] 42178 v_0_1 := v_0.Args[1] 42179 if v_0_1.Op != OpAMD64SHLL { 42180 break 42181 } 42182 x := v_0_1.Args[1] 42183 v_0_1_0 := v_0_1.Args[0] 42184 if v_0_1_0.Op != OpAMD64MOVLconst || v_0_1_0.AuxInt != 1 { 42185 break 42186 } 42187 v.reset(OpAMD64SETAE) 42188 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 42189 v0.AddArg(x) 42190 v0.AddArg(y) 42191 v.AddArg(v0) 42192 return true 42193 } 42194 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 42195 // result: (SETAE (BTQ x y)) 42196 for { 42197 v_0 := v.Args[0] 42198 if v_0.Op != OpAMD64TESTQ { 42199 break 42200 } 42201 y := v_0.Args[1] 42202 v_0_0 := v_0.Args[0] 42203 if v_0_0.Op != OpAMD64SHLQ { 42204 break 42205 } 42206 x := v_0_0.Args[1] 42207 v_0_0_0 := v_0_0.Args[0] 42208 if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { 42209 break 42210 } 42211 v.reset(OpAMD64SETAE) 42212 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 42213 v0.AddArg(x) 42214 v0.AddArg(y) 42215 v.AddArg(v0) 42216 return true 42217 } 42218 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 42219 // result: (SETAE (BTQ x y)) 42220 for { 42221 v_0 := v.Args[0] 42222 if v_0.Op != OpAMD64TESTQ { 42223 break 42224 } 42225 _ = v_0.Args[1] 42226 y := v_0.Args[0] 42227 v_0_1 := v_0.Args[1] 42228 if v_0_1.Op != OpAMD64SHLQ { 42229 break 42230 } 42231 x := v_0_1.Args[1] 42232 v_0_1_0 := v_0_1.Args[0] 42233 if v_0_1_0.Op != OpAMD64MOVQconst || v_0_1_0.AuxInt != 1 { 42234 break 42235 } 42236 v.reset(OpAMD64SETAE) 42237 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 42238 v0.AddArg(x) 42239 v0.AddArg(y) 42240 v.AddArg(v0) 42241 return true 42242 } 42243 // match: (SETEQ (TESTLconst [c] x)) 42244 // cond: isUint32PowerOfTwo(c) 42245 // result: (SETAE (BTLconst [log2uint32(c)] x)) 42246 for { 42247 v_0 := v.Args[0] 42248 if v_0.Op != OpAMD64TESTLconst { 42249 break 42250 } 42251 c := v_0.AuxInt 42252 x := v_0.Args[0] 42253 if !(isUint32PowerOfTwo(c)) { 42254 break 42255 } 42256 v.reset(OpAMD64SETAE) 42257 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 42258 v0.AuxInt = log2uint32(c) 42259 v0.AddArg(x) 42260 v.AddArg(v0) 42261 return true 42262 } 42263 // match: (SETEQ (TESTQconst [c] x)) 42264 // cond: isUint64PowerOfTwo(c) 42265 // result: (SETAE (BTQconst [log2(c)] x)) 42266 for { 42267 v_0 := v.Args[0] 42268 if v_0.Op != OpAMD64TESTQconst { 42269 break 42270 } 42271 c := v_0.AuxInt 42272 x := v_0.Args[0] 42273 if !(isUint64PowerOfTwo(c)) { 42274 break 42275 } 42276 v.reset(OpAMD64SETAE) 42277 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42278 v0.AuxInt = log2(c) 42279 v0.AddArg(x) 42280 v.AddArg(v0) 42281 return true 42282 } 42283 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 42284 // cond: isUint64PowerOfTwo(c) 42285 // result: (SETAE (BTQconst [log2(c)] x)) 42286 for { 42287 v_0 := v.Args[0] 42288 if v_0.Op != OpAMD64TESTQ { 42289 break 42290 } 42291 x := v_0.Args[1] 42292 v_0_0 := v_0.Args[0] 42293 if v_0_0.Op != OpAMD64MOVQconst { 42294 break 42295 } 42296 c := v_0_0.AuxInt 42297 if !(isUint64PowerOfTwo(c)) { 42298 break 42299 } 42300 v.reset(OpAMD64SETAE) 42301 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42302 v0.AuxInt = log2(c) 42303 v0.AddArg(x) 42304 v.AddArg(v0) 42305 return true 42306 } 42307 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 42308 // cond: isUint64PowerOfTwo(c) 42309 // result: (SETAE (BTQconst [log2(c)] x)) 42310 for { 42311 v_0 := v.Args[0] 42312 if v_0.Op != OpAMD64TESTQ { 42313 break 42314 } 42315 _ = v_0.Args[1] 42316 x := v_0.Args[0] 42317 v_0_1 := v_0.Args[1] 42318 if v_0_1.Op != OpAMD64MOVQconst { 42319 break 42320 } 42321 c := v_0_1.AuxInt 42322 if !(isUint64PowerOfTwo(c)) { 42323 break 42324 } 42325 v.reset(OpAMD64SETAE) 42326 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42327 v0.AuxInt = log2(c) 42328 v0.AddArg(x) 42329 v.AddArg(v0) 42330 return true 42331 } 42332 // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _))) 42333 // result: (SETNE (CMPLconst [0] s)) 42334 for { 42335 v_0 := v.Args[0] 42336 if v_0.Op != OpAMD64CMPLconst || v_0.AuxInt != 1 { 42337 break 42338 } 42339 s := v_0.Args[0] 42340 if s.Op != OpAMD64ANDLconst || s.AuxInt != 1 { 42341 break 42342 } 42343 v.reset(OpAMD64SETNE) 42344 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 42345 v0.AuxInt = 0 42346 v0.AddArg(s) 42347 v.AddArg(v0) 42348 return true 42349 } 42350 // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _))) 42351 // result: (SETNE (CMPQconst [0] s)) 42352 for { 42353 v_0 := v.Args[0] 42354 if v_0.Op != OpAMD64CMPQconst || v_0.AuxInt != 1 { 42355 break 42356 } 42357 s := v_0.Args[0] 42358 if s.Op != OpAMD64ANDQconst || s.AuxInt != 1 { 42359 break 42360 } 42361 v.reset(OpAMD64SETNE) 42362 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 42363 v0.AuxInt = 0 42364 v0.AddArg(s) 42365 v.AddArg(v0) 42366 return true 42367 } 42368 return false 42369 } 42370 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { 42371 b := v.Block 42372 // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 42373 // cond: z1==z2 42374 // result: (SETAE (BTQconst [63] x)) 42375 for { 42376 v_0 := v.Args[0] 42377 if v_0.Op != OpAMD64TESTQ { 42378 break 42379 } 42380 z2 := v_0.Args[1] 42381 z1 := v_0.Args[0] 42382 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { 42383 break 42384 } 42385 z1_0 := z1.Args[0] 42386 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { 42387 break 42388 } 42389 x := z1_0.Args[0] 42390 if !(z1 == z2) { 42391 break 42392 } 42393 v.reset(OpAMD64SETAE) 42394 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42395 v0.AuxInt = 63 42396 v0.AddArg(x) 42397 v.AddArg(v0) 42398 return true 42399 } 42400 // match: (SETEQ (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) 42401 // cond: z1==z2 42402 // result: (SETAE (BTQconst [63] x)) 42403 for { 42404 v_0 := v.Args[0] 42405 if v_0.Op != OpAMD64TESTQ { 42406 break 42407 } 42408 _ = v_0.Args[1] 42409 z2 := v_0.Args[0] 42410 z1 := v_0.Args[1] 42411 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { 42412 break 42413 } 42414 z1_0 := z1.Args[0] 42415 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { 42416 break 42417 } 42418 x := z1_0.Args[0] 42419 if !(z1 == z2) { 42420 break 42421 } 42422 v.reset(OpAMD64SETAE) 42423 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42424 v0.AuxInt = 63 42425 v0.AddArg(x) 42426 v.AddArg(v0) 42427 return true 42428 } 42429 // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 42430 // cond: z1==z2 42431 // result: (SETAE (BTQconst [31] x)) 42432 for { 42433 v_0 := v.Args[0] 42434 if v_0.Op != OpAMD64TESTL { 42435 break 42436 } 42437 z2 := v_0.Args[1] 42438 z1 := v_0.Args[0] 42439 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { 42440 break 42441 } 42442 z1_0 := z1.Args[0] 42443 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { 42444 break 42445 } 42446 x := z1_0.Args[0] 42447 if !(z1 == z2) { 42448 break 42449 } 42450 v.reset(OpAMD64SETAE) 42451 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42452 v0.AuxInt = 31 42453 v0.AddArg(x) 42454 v.AddArg(v0) 42455 return true 42456 } 42457 // match: (SETEQ (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) 42458 // cond: z1==z2 42459 // result: (SETAE (BTQconst [31] x)) 42460 for { 42461 v_0 := v.Args[0] 42462 if v_0.Op != OpAMD64TESTL { 42463 break 42464 } 42465 _ = v_0.Args[1] 42466 z2 := v_0.Args[0] 42467 z1 := v_0.Args[1] 42468 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { 42469 break 42470 } 42471 z1_0 := z1.Args[0] 42472 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { 42473 break 42474 } 42475 x := z1_0.Args[0] 42476 if !(z1 == z2) { 42477 break 42478 } 42479 v.reset(OpAMD64SETAE) 42480 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42481 v0.AuxInt = 31 42482 v0.AddArg(x) 42483 v.AddArg(v0) 42484 return true 42485 } 42486 // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 42487 // cond: z1==z2 42488 // result: (SETAE (BTQconst [0] x)) 42489 for { 42490 v_0 := v.Args[0] 42491 if v_0.Op != OpAMD64TESTQ { 42492 break 42493 } 42494 z2 := v_0.Args[1] 42495 z1 := v_0.Args[0] 42496 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 42497 break 42498 } 42499 z1_0 := z1.Args[0] 42500 if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { 42501 break 42502 } 42503 x := z1_0.Args[0] 42504 if !(z1 == z2) { 42505 break 42506 } 42507 v.reset(OpAMD64SETAE) 42508 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42509 v0.AuxInt = 0 42510 v0.AddArg(x) 42511 v.AddArg(v0) 42512 return true 42513 } 42514 // match: (SETEQ (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) 42515 // cond: z1==z2 42516 // result: (SETAE (BTQconst [0] x)) 42517 for { 42518 v_0 := v.Args[0] 42519 if v_0.Op != OpAMD64TESTQ { 42520 break 42521 } 42522 _ = v_0.Args[1] 42523 z2 := v_0.Args[0] 42524 z1 := v_0.Args[1] 42525 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 42526 break 42527 } 42528 z1_0 := z1.Args[0] 42529 if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { 42530 break 42531 } 42532 x := z1_0.Args[0] 42533 if !(z1 == z2) { 42534 break 42535 } 42536 v.reset(OpAMD64SETAE) 42537 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42538 v0.AuxInt = 0 42539 v0.AddArg(x) 42540 v.AddArg(v0) 42541 return true 42542 } 42543 // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 42544 // cond: z1==z2 42545 // result: (SETAE (BTLconst [0] x)) 42546 for { 42547 v_0 := v.Args[0] 42548 if v_0.Op != OpAMD64TESTL { 42549 break 42550 } 42551 z2 := v_0.Args[1] 42552 z1 := v_0.Args[0] 42553 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 42554 break 42555 } 42556 z1_0 := z1.Args[0] 42557 if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { 42558 break 42559 } 42560 x := z1_0.Args[0] 42561 if !(z1 == z2) { 42562 break 42563 } 42564 v.reset(OpAMD64SETAE) 42565 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 42566 v0.AuxInt = 0 42567 v0.AddArg(x) 42568 v.AddArg(v0) 42569 return true 42570 } 42571 // match: (SETEQ (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) 42572 // cond: z1==z2 42573 // result: (SETAE (BTLconst [0] x)) 42574 for { 42575 v_0 := v.Args[0] 42576 if v_0.Op != OpAMD64TESTL { 42577 break 42578 } 42579 _ = v_0.Args[1] 42580 z2 := v_0.Args[0] 42581 z1 := v_0.Args[1] 42582 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 42583 break 42584 } 42585 z1_0 := z1.Args[0] 42586 if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { 42587 break 42588 } 42589 x := z1_0.Args[0] 42590 if !(z1 == z2) { 42591 break 42592 } 42593 v.reset(OpAMD64SETAE) 42594 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 42595 v0.AuxInt = 0 42596 v0.AddArg(x) 42597 v.AddArg(v0) 42598 return true 42599 } 42600 // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2)) 42601 // cond: z1==z2 42602 // result: (SETAE (BTQconst [63] x)) 42603 for { 42604 v_0 := v.Args[0] 42605 if v_0.Op != OpAMD64TESTQ { 42606 break 42607 } 42608 z2 := v_0.Args[1] 42609 z1 := v_0.Args[0] 42610 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 42611 break 42612 } 42613 x := z1.Args[0] 42614 if !(z1 == z2) { 42615 break 42616 } 42617 v.reset(OpAMD64SETAE) 42618 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42619 v0.AuxInt = 63 42620 v0.AddArg(x) 42621 v.AddArg(v0) 42622 return true 42623 } 42624 // match: (SETEQ (TESTQ z2 z1:(SHRQconst [63] x))) 42625 // cond: z1==z2 42626 // result: (SETAE (BTQconst [63] x)) 42627 for { 42628 v_0 := v.Args[0] 42629 if v_0.Op != OpAMD64TESTQ { 42630 break 42631 } 42632 _ = v_0.Args[1] 42633 z2 := v_0.Args[0] 42634 z1 := v_0.Args[1] 42635 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 42636 break 42637 } 42638 x := z1.Args[0] 42639 if !(z1 == z2) { 42640 break 42641 } 42642 v.reset(OpAMD64SETAE) 42643 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42644 v0.AuxInt = 63 42645 v0.AddArg(x) 42646 v.AddArg(v0) 42647 return true 42648 } 42649 return false 42650 } 42651 func rewriteValueAMD64_OpAMD64SETEQ_20(v *Value) bool { 42652 b := v.Block 42653 // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2)) 42654 // cond: z1==z2 42655 // result: (SETAE (BTLconst [31] x)) 42656 for { 42657 v_0 := v.Args[0] 42658 if v_0.Op != OpAMD64TESTL { 42659 break 42660 } 42661 z2 := v_0.Args[1] 42662 z1 := v_0.Args[0] 42663 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 42664 break 42665 } 42666 x := z1.Args[0] 42667 if !(z1 == z2) { 42668 break 42669 } 42670 v.reset(OpAMD64SETAE) 42671 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 42672 v0.AuxInt = 31 42673 v0.AddArg(x) 42674 v.AddArg(v0) 42675 return true 42676 } 42677 // match: (SETEQ (TESTL z2 z1:(SHRLconst [31] x))) 42678 // cond: z1==z2 42679 // result: (SETAE (BTLconst [31] x)) 42680 for { 42681 v_0 := v.Args[0] 42682 if v_0.Op != OpAMD64TESTL { 42683 break 42684 } 42685 _ = v_0.Args[1] 42686 z2 := v_0.Args[0] 42687 z1 := v_0.Args[1] 42688 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 42689 break 42690 } 42691 x := z1.Args[0] 42692 if !(z1 == z2) { 42693 break 42694 } 42695 v.reset(OpAMD64SETAE) 42696 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 42697 v0.AuxInt = 31 42698 v0.AddArg(x) 42699 v.AddArg(v0) 42700 return true 42701 } 42702 // match: (SETEQ (InvertFlags x)) 42703 // result: (SETEQ x) 42704 for { 42705 v_0 := v.Args[0] 42706 if v_0.Op != OpAMD64InvertFlags { 42707 break 42708 } 42709 x := v_0.Args[0] 42710 v.reset(OpAMD64SETEQ) 42711 v.AddArg(x) 42712 return true 42713 } 42714 // match: (SETEQ (FlagEQ)) 42715 // result: (MOVLconst [1]) 42716 for { 42717 v_0 := v.Args[0] 42718 if v_0.Op != OpAMD64FlagEQ { 42719 break 42720 } 42721 v.reset(OpAMD64MOVLconst) 42722 v.AuxInt = 1 42723 return true 42724 } 42725 // match: (SETEQ (FlagLT_ULT)) 42726 // result: (MOVLconst [0]) 42727 for { 42728 v_0 := v.Args[0] 42729 if v_0.Op != OpAMD64FlagLT_ULT { 42730 break 42731 } 42732 v.reset(OpAMD64MOVLconst) 42733 v.AuxInt = 0 42734 return true 42735 } 42736 // match: (SETEQ (FlagLT_UGT)) 42737 // result: (MOVLconst [0]) 42738 for { 42739 v_0 := v.Args[0] 42740 if v_0.Op != OpAMD64FlagLT_UGT { 42741 break 42742 } 42743 v.reset(OpAMD64MOVLconst) 42744 v.AuxInt = 0 42745 return true 42746 } 42747 // match: (SETEQ (FlagGT_ULT)) 42748 // result: (MOVLconst [0]) 42749 for { 42750 v_0 := v.Args[0] 42751 if v_0.Op != OpAMD64FlagGT_ULT { 42752 break 42753 } 42754 v.reset(OpAMD64MOVLconst) 42755 v.AuxInt = 0 42756 return true 42757 } 42758 // match: (SETEQ (FlagGT_UGT)) 42759 // result: (MOVLconst [0]) 42760 for { 42761 v_0 := v.Args[0] 42762 if v_0.Op != OpAMD64FlagGT_UGT { 42763 break 42764 } 42765 v.reset(OpAMD64MOVLconst) 42766 v.AuxInt = 0 42767 return true 42768 } 42769 return false 42770 } 42771 func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool { 42772 b := v.Block 42773 // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 42774 // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) 42775 for { 42776 off := v.AuxInt 42777 sym := v.Aux 42778 mem := v.Args[2] 42779 ptr := v.Args[0] 42780 v_1 := v.Args[1] 42781 if v_1.Op != OpAMD64TESTL { 42782 break 42783 } 42784 y := v_1.Args[1] 42785 v_1_0 := v_1.Args[0] 42786 if v_1_0.Op != OpAMD64SHLL { 42787 break 42788 } 42789 x := v_1_0.Args[1] 42790 v_1_0_0 := v_1_0.Args[0] 42791 if v_1_0_0.Op != OpAMD64MOVLconst || v_1_0_0.AuxInt != 1 { 42792 break 42793 } 42794 v.reset(OpAMD64SETAEstore) 42795 v.AuxInt = off 42796 v.Aux = sym 42797 v.AddArg(ptr) 42798 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 42799 v0.AddArg(x) 42800 v0.AddArg(y) 42801 v.AddArg(v0) 42802 v.AddArg(mem) 42803 return true 42804 } 42805 // match: (SETEQstore [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 42806 // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) 42807 for { 42808 off := v.AuxInt 42809 sym := v.Aux 42810 mem := v.Args[2] 42811 ptr := v.Args[0] 42812 v_1 := v.Args[1] 42813 if v_1.Op != OpAMD64TESTL { 42814 break 42815 } 42816 _ = v_1.Args[1] 42817 y := v_1.Args[0] 42818 v_1_1 := v_1.Args[1] 42819 if v_1_1.Op != OpAMD64SHLL { 42820 break 42821 } 42822 x := v_1_1.Args[1] 42823 v_1_1_0 := v_1_1.Args[0] 42824 if v_1_1_0.Op != OpAMD64MOVLconst || v_1_1_0.AuxInt != 1 { 42825 break 42826 } 42827 v.reset(OpAMD64SETAEstore) 42828 v.AuxInt = off 42829 v.Aux = sym 42830 v.AddArg(ptr) 42831 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 42832 v0.AddArg(x) 42833 v0.AddArg(y) 42834 v.AddArg(v0) 42835 v.AddArg(mem) 42836 return true 42837 } 42838 // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 42839 // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem) 42840 for { 42841 off := v.AuxInt 42842 sym := v.Aux 42843 mem := v.Args[2] 42844 ptr := v.Args[0] 42845 v_1 := v.Args[1] 42846 if v_1.Op != OpAMD64TESTQ { 42847 break 42848 } 42849 y := v_1.Args[1] 42850 v_1_0 := v_1.Args[0] 42851 if v_1_0.Op != OpAMD64SHLQ { 42852 break 42853 } 42854 x := v_1_0.Args[1] 42855 v_1_0_0 := v_1_0.Args[0] 42856 if v_1_0_0.Op != OpAMD64MOVQconst || v_1_0_0.AuxInt != 1 { 42857 break 42858 } 42859 v.reset(OpAMD64SETAEstore) 42860 v.AuxInt = off 42861 v.Aux = sym 42862 v.AddArg(ptr) 42863 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 42864 v0.AddArg(x) 42865 v0.AddArg(y) 42866 v.AddArg(v0) 42867 v.AddArg(mem) 42868 return true 42869 } 42870 // match: (SETEQstore [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 42871 // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem) 42872 for { 42873 off := v.AuxInt 42874 sym := v.Aux 42875 mem := v.Args[2] 42876 ptr := v.Args[0] 42877 v_1 := v.Args[1] 42878 if v_1.Op != OpAMD64TESTQ { 42879 break 42880 } 42881 _ = v_1.Args[1] 42882 y := v_1.Args[0] 42883 v_1_1 := v_1.Args[1] 42884 if v_1_1.Op != OpAMD64SHLQ { 42885 break 42886 } 42887 x := v_1_1.Args[1] 42888 v_1_1_0 := v_1_1.Args[0] 42889 if v_1_1_0.Op != OpAMD64MOVQconst || v_1_1_0.AuxInt != 1 { 42890 break 42891 } 42892 v.reset(OpAMD64SETAEstore) 42893 v.AuxInt = off 42894 v.Aux = sym 42895 v.AddArg(ptr) 42896 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 42897 v0.AddArg(x) 42898 v0.AddArg(y) 42899 v.AddArg(v0) 42900 v.AddArg(mem) 42901 return true 42902 } 42903 // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem) 42904 // cond: isUint32PowerOfTwo(c) 42905 // result: (SETAEstore [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem) 42906 for { 42907 off := v.AuxInt 42908 sym := v.Aux 42909 mem := v.Args[2] 42910 ptr := v.Args[0] 42911 v_1 := v.Args[1] 42912 if v_1.Op != OpAMD64TESTLconst { 42913 break 42914 } 42915 c := v_1.AuxInt 42916 x := v_1.Args[0] 42917 if !(isUint32PowerOfTwo(c)) { 42918 break 42919 } 42920 v.reset(OpAMD64SETAEstore) 42921 v.AuxInt = off 42922 v.Aux = sym 42923 v.AddArg(ptr) 42924 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 42925 v0.AuxInt = log2uint32(c) 42926 v0.AddArg(x) 42927 v.AddArg(v0) 42928 v.AddArg(mem) 42929 return true 42930 } 42931 // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem) 42932 // cond: isUint64PowerOfTwo(c) 42933 // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 42934 for { 42935 off := v.AuxInt 42936 sym := v.Aux 42937 mem := v.Args[2] 42938 ptr := v.Args[0] 42939 v_1 := v.Args[1] 42940 if v_1.Op != OpAMD64TESTQconst { 42941 break 42942 } 42943 c := v_1.AuxInt 42944 x := v_1.Args[0] 42945 if !(isUint64PowerOfTwo(c)) { 42946 break 42947 } 42948 v.reset(OpAMD64SETAEstore) 42949 v.AuxInt = off 42950 v.Aux = sym 42951 v.AddArg(ptr) 42952 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42953 v0.AuxInt = log2(c) 42954 v0.AddArg(x) 42955 v.AddArg(v0) 42956 v.AddArg(mem) 42957 return true 42958 } 42959 // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 42960 // cond: isUint64PowerOfTwo(c) 42961 // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 42962 for { 42963 off := v.AuxInt 42964 sym := v.Aux 42965 mem := v.Args[2] 42966 ptr := v.Args[0] 42967 v_1 := v.Args[1] 42968 if v_1.Op != OpAMD64TESTQ { 42969 break 42970 } 42971 x := v_1.Args[1] 42972 v_1_0 := v_1.Args[0] 42973 if v_1_0.Op != OpAMD64MOVQconst { 42974 break 42975 } 42976 c := v_1_0.AuxInt 42977 if !(isUint64PowerOfTwo(c)) { 42978 break 42979 } 42980 v.reset(OpAMD64SETAEstore) 42981 v.AuxInt = off 42982 v.Aux = sym 42983 v.AddArg(ptr) 42984 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 42985 v0.AuxInt = log2(c) 42986 v0.AddArg(x) 42987 v.AddArg(v0) 42988 v.AddArg(mem) 42989 return true 42990 } 42991 // match: (SETEQstore [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 42992 // cond: isUint64PowerOfTwo(c) 42993 // result: (SETAEstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 42994 for { 42995 off := v.AuxInt 42996 sym := v.Aux 42997 mem := v.Args[2] 42998 ptr := v.Args[0] 42999 v_1 := v.Args[1] 43000 if v_1.Op != OpAMD64TESTQ { 43001 break 43002 } 43003 _ = v_1.Args[1] 43004 x := v_1.Args[0] 43005 v_1_1 := v_1.Args[1] 43006 if v_1_1.Op != OpAMD64MOVQconst { 43007 break 43008 } 43009 c := v_1_1.AuxInt 43010 if !(isUint64PowerOfTwo(c)) { 43011 break 43012 } 43013 v.reset(OpAMD64SETAEstore) 43014 v.AuxInt = off 43015 v.Aux = sym 43016 v.AddArg(ptr) 43017 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43018 v0.AuxInt = log2(c) 43019 v0.AddArg(x) 43020 v.AddArg(v0) 43021 v.AddArg(mem) 43022 return true 43023 } 43024 // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) 43025 // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem) 43026 for { 43027 off := v.AuxInt 43028 sym := v.Aux 43029 mem := v.Args[2] 43030 ptr := v.Args[0] 43031 v_1 := v.Args[1] 43032 if v_1.Op != OpAMD64CMPLconst || v_1.AuxInt != 1 { 43033 break 43034 } 43035 s := v_1.Args[0] 43036 if s.Op != OpAMD64ANDLconst || s.AuxInt != 1 { 43037 break 43038 } 43039 v.reset(OpAMD64SETNEstore) 43040 v.AuxInt = off 43041 v.Aux = sym 43042 v.AddArg(ptr) 43043 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 43044 v0.AuxInt = 0 43045 v0.AddArg(s) 43046 v.AddArg(v0) 43047 v.AddArg(mem) 43048 return true 43049 } 43050 // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) 43051 // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem) 43052 for { 43053 off := v.AuxInt 43054 sym := v.Aux 43055 mem := v.Args[2] 43056 ptr := v.Args[0] 43057 v_1 := v.Args[1] 43058 if v_1.Op != OpAMD64CMPQconst || v_1.AuxInt != 1 { 43059 break 43060 } 43061 s := v_1.Args[0] 43062 if s.Op != OpAMD64ANDQconst || s.AuxInt != 1 { 43063 break 43064 } 43065 v.reset(OpAMD64SETNEstore) 43066 v.AuxInt = off 43067 v.Aux = sym 43068 v.AddArg(ptr) 43069 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 43070 v0.AuxInt = 0 43071 v0.AddArg(s) 43072 v.AddArg(v0) 43073 v.AddArg(mem) 43074 return true 43075 } 43076 return false 43077 } 43078 func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool { 43079 b := v.Block 43080 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) 43081 // cond: z1==z2 43082 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 43083 for { 43084 off := v.AuxInt 43085 sym := v.Aux 43086 mem := v.Args[2] 43087 ptr := v.Args[0] 43088 v_1 := v.Args[1] 43089 if v_1.Op != OpAMD64TESTQ { 43090 break 43091 } 43092 z2 := v_1.Args[1] 43093 z1 := v_1.Args[0] 43094 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { 43095 break 43096 } 43097 z1_0 := z1.Args[0] 43098 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { 43099 break 43100 } 43101 x := z1_0.Args[0] 43102 if !(z1 == z2) { 43103 break 43104 } 43105 v.reset(OpAMD64SETAEstore) 43106 v.AuxInt = off 43107 v.Aux = sym 43108 v.AddArg(ptr) 43109 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43110 v0.AuxInt = 63 43111 v0.AddArg(x) 43112 v.AddArg(v0) 43113 v.AddArg(mem) 43114 return true 43115 } 43116 // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem) 43117 // cond: z1==z2 43118 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 43119 for { 43120 off := v.AuxInt 43121 sym := v.Aux 43122 mem := v.Args[2] 43123 ptr := v.Args[0] 43124 v_1 := v.Args[1] 43125 if v_1.Op != OpAMD64TESTQ { 43126 break 43127 } 43128 _ = v_1.Args[1] 43129 z2 := v_1.Args[0] 43130 z1 := v_1.Args[1] 43131 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { 43132 break 43133 } 43134 z1_0 := z1.Args[0] 43135 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { 43136 break 43137 } 43138 x := z1_0.Args[0] 43139 if !(z1 == z2) { 43140 break 43141 } 43142 v.reset(OpAMD64SETAEstore) 43143 v.AuxInt = off 43144 v.Aux = sym 43145 v.AddArg(ptr) 43146 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43147 v0.AuxInt = 63 43148 v0.AddArg(x) 43149 v.AddArg(v0) 43150 v.AddArg(mem) 43151 return true 43152 } 43153 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) 43154 // cond: z1==z2 43155 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 43156 for { 43157 off := v.AuxInt 43158 sym := v.Aux 43159 mem := v.Args[2] 43160 ptr := v.Args[0] 43161 v_1 := v.Args[1] 43162 if v_1.Op != OpAMD64TESTL { 43163 break 43164 } 43165 z2 := v_1.Args[1] 43166 z1 := v_1.Args[0] 43167 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { 43168 break 43169 } 43170 z1_0 := z1.Args[0] 43171 if z1_0.Op != OpAMD64SHRLconst || z1_0.AuxInt != 31 { 43172 break 43173 } 43174 x := z1_0.Args[0] 43175 if !(z1 == z2) { 43176 break 43177 } 43178 v.reset(OpAMD64SETAEstore) 43179 v.AuxInt = off 43180 v.Aux = sym 43181 v.AddArg(ptr) 43182 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 43183 v0.AuxInt = 31 43184 v0.AddArg(x) 43185 v.AddArg(v0) 43186 v.AddArg(mem) 43187 return true 43188 } 43189 // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem) 43190 // cond: z1==z2 43191 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 43192 for { 43193 off := v.AuxInt 43194 sym := v.Aux 43195 mem := v.Args[2] 43196 ptr := v.Args[0] 43197 v_1 := v.Args[1] 43198 if v_1.Op != OpAMD64TESTL { 43199 break 43200 } 43201 _ = v_1.Args[1] 43202 z2 := v_1.Args[0] 43203 z1 := v_1.Args[1] 43204 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { 43205 break 43206 } 43207 z1_0 := z1.Args[0] 43208 if z1_0.Op != OpAMD64SHRLconst || z1_0.AuxInt != 31 { 43209 break 43210 } 43211 x := z1_0.Args[0] 43212 if !(z1 == z2) { 43213 break 43214 } 43215 v.reset(OpAMD64SETAEstore) 43216 v.AuxInt = off 43217 v.Aux = sym 43218 v.AddArg(ptr) 43219 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 43220 v0.AuxInt = 31 43221 v0.AddArg(x) 43222 v.AddArg(v0) 43223 v.AddArg(mem) 43224 return true 43225 } 43226 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) 43227 // cond: z1==z2 43228 // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem) 43229 for { 43230 off := v.AuxInt 43231 sym := v.Aux 43232 mem := v.Args[2] 43233 ptr := v.Args[0] 43234 v_1 := v.Args[1] 43235 if v_1.Op != OpAMD64TESTQ { 43236 break 43237 } 43238 z2 := v_1.Args[1] 43239 z1 := v_1.Args[0] 43240 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 43241 break 43242 } 43243 z1_0 := z1.Args[0] 43244 if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { 43245 break 43246 } 43247 x := z1_0.Args[0] 43248 if !(z1 == z2) { 43249 break 43250 } 43251 v.reset(OpAMD64SETAEstore) 43252 v.AuxInt = off 43253 v.Aux = sym 43254 v.AddArg(ptr) 43255 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43256 v0.AuxInt = 0 43257 v0.AddArg(x) 43258 v.AddArg(v0) 43259 v.AddArg(mem) 43260 return true 43261 } 43262 // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem) 43263 // cond: z1==z2 43264 // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem) 43265 for { 43266 off := v.AuxInt 43267 sym := v.Aux 43268 mem := v.Args[2] 43269 ptr := v.Args[0] 43270 v_1 := v.Args[1] 43271 if v_1.Op != OpAMD64TESTQ { 43272 break 43273 } 43274 _ = v_1.Args[1] 43275 z2 := v_1.Args[0] 43276 z1 := v_1.Args[1] 43277 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 43278 break 43279 } 43280 z1_0 := z1.Args[0] 43281 if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { 43282 break 43283 } 43284 x := z1_0.Args[0] 43285 if !(z1 == z2) { 43286 break 43287 } 43288 v.reset(OpAMD64SETAEstore) 43289 v.AuxInt = off 43290 v.Aux = sym 43291 v.AddArg(ptr) 43292 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43293 v0.AuxInt = 0 43294 v0.AddArg(x) 43295 v.AddArg(v0) 43296 v.AddArg(mem) 43297 return true 43298 } 43299 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) 43300 // cond: z1==z2 43301 // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem) 43302 for { 43303 off := v.AuxInt 43304 sym := v.Aux 43305 mem := v.Args[2] 43306 ptr := v.Args[0] 43307 v_1 := v.Args[1] 43308 if v_1.Op != OpAMD64TESTL { 43309 break 43310 } 43311 z2 := v_1.Args[1] 43312 z1 := v_1.Args[0] 43313 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 43314 break 43315 } 43316 z1_0 := z1.Args[0] 43317 if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { 43318 break 43319 } 43320 x := z1_0.Args[0] 43321 if !(z1 == z2) { 43322 break 43323 } 43324 v.reset(OpAMD64SETAEstore) 43325 v.AuxInt = off 43326 v.Aux = sym 43327 v.AddArg(ptr) 43328 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 43329 v0.AuxInt = 0 43330 v0.AddArg(x) 43331 v.AddArg(v0) 43332 v.AddArg(mem) 43333 return true 43334 } 43335 // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem) 43336 // cond: z1==z2 43337 // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem) 43338 for { 43339 off := v.AuxInt 43340 sym := v.Aux 43341 mem := v.Args[2] 43342 ptr := v.Args[0] 43343 v_1 := v.Args[1] 43344 if v_1.Op != OpAMD64TESTL { 43345 break 43346 } 43347 _ = v_1.Args[1] 43348 z2 := v_1.Args[0] 43349 z1 := v_1.Args[1] 43350 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 43351 break 43352 } 43353 z1_0 := z1.Args[0] 43354 if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { 43355 break 43356 } 43357 x := z1_0.Args[0] 43358 if !(z1 == z2) { 43359 break 43360 } 43361 v.reset(OpAMD64SETAEstore) 43362 v.AuxInt = off 43363 v.Aux = sym 43364 v.AddArg(ptr) 43365 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 43366 v0.AuxInt = 0 43367 v0.AddArg(x) 43368 v.AddArg(v0) 43369 v.AddArg(mem) 43370 return true 43371 } 43372 // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) 43373 // cond: z1==z2 43374 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 43375 for { 43376 off := v.AuxInt 43377 sym := v.Aux 43378 mem := v.Args[2] 43379 ptr := v.Args[0] 43380 v_1 := v.Args[1] 43381 if v_1.Op != OpAMD64TESTQ { 43382 break 43383 } 43384 z2 := v_1.Args[1] 43385 z1 := v_1.Args[0] 43386 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 43387 break 43388 } 43389 x := z1.Args[0] 43390 if !(z1 == z2) { 43391 break 43392 } 43393 v.reset(OpAMD64SETAEstore) 43394 v.AuxInt = off 43395 v.Aux = sym 43396 v.AddArg(ptr) 43397 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43398 v0.AuxInt = 63 43399 v0.AddArg(x) 43400 v.AddArg(v0) 43401 v.AddArg(mem) 43402 return true 43403 } 43404 // match: (SETEQstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem) 43405 // cond: z1==z2 43406 // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) 43407 for { 43408 off := v.AuxInt 43409 sym := v.Aux 43410 mem := v.Args[2] 43411 ptr := v.Args[0] 43412 v_1 := v.Args[1] 43413 if v_1.Op != OpAMD64TESTQ { 43414 break 43415 } 43416 _ = v_1.Args[1] 43417 z2 := v_1.Args[0] 43418 z1 := v_1.Args[1] 43419 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 43420 break 43421 } 43422 x := z1.Args[0] 43423 if !(z1 == z2) { 43424 break 43425 } 43426 v.reset(OpAMD64SETAEstore) 43427 v.AuxInt = off 43428 v.Aux = sym 43429 v.AddArg(ptr) 43430 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43431 v0.AuxInt = 63 43432 v0.AddArg(x) 43433 v.AddArg(v0) 43434 v.AddArg(mem) 43435 return true 43436 } 43437 return false 43438 } 43439 func rewriteValueAMD64_OpAMD64SETEQstore_20(v *Value) bool { 43440 b := v.Block 43441 typ := &b.Func.Config.Types 43442 // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) 43443 // cond: z1==z2 43444 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 43445 for { 43446 off := v.AuxInt 43447 sym := v.Aux 43448 mem := v.Args[2] 43449 ptr := v.Args[0] 43450 v_1 := v.Args[1] 43451 if v_1.Op != OpAMD64TESTL { 43452 break 43453 } 43454 z2 := v_1.Args[1] 43455 z1 := v_1.Args[0] 43456 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 43457 break 43458 } 43459 x := z1.Args[0] 43460 if !(z1 == z2) { 43461 break 43462 } 43463 v.reset(OpAMD64SETAEstore) 43464 v.AuxInt = off 43465 v.Aux = sym 43466 v.AddArg(ptr) 43467 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 43468 v0.AuxInt = 31 43469 v0.AddArg(x) 43470 v.AddArg(v0) 43471 v.AddArg(mem) 43472 return true 43473 } 43474 // match: (SETEQstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem) 43475 // cond: z1==z2 43476 // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) 43477 for { 43478 off := v.AuxInt 43479 sym := v.Aux 43480 mem := v.Args[2] 43481 ptr := v.Args[0] 43482 v_1 := v.Args[1] 43483 if v_1.Op != OpAMD64TESTL { 43484 break 43485 } 43486 _ = v_1.Args[1] 43487 z2 := v_1.Args[0] 43488 z1 := v_1.Args[1] 43489 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 43490 break 43491 } 43492 x := z1.Args[0] 43493 if !(z1 == z2) { 43494 break 43495 } 43496 v.reset(OpAMD64SETAEstore) 43497 v.AuxInt = off 43498 v.Aux = sym 43499 v.AddArg(ptr) 43500 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 43501 v0.AuxInt = 31 43502 v0.AddArg(x) 43503 v.AddArg(v0) 43504 v.AddArg(mem) 43505 return true 43506 } 43507 // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem) 43508 // result: (SETEQstore [off] {sym} ptr x mem) 43509 for { 43510 off := v.AuxInt 43511 sym := v.Aux 43512 mem := v.Args[2] 43513 ptr := v.Args[0] 43514 v_1 := v.Args[1] 43515 if v_1.Op != OpAMD64InvertFlags { 43516 break 43517 } 43518 x := v_1.Args[0] 43519 v.reset(OpAMD64SETEQstore) 43520 v.AuxInt = off 43521 v.Aux = sym 43522 v.AddArg(ptr) 43523 v.AddArg(x) 43524 v.AddArg(mem) 43525 return true 43526 } 43527 // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem) 43528 // cond: is32Bit(off1+off2) 43529 // result: (SETEQstore [off1+off2] {sym} base val mem) 43530 for { 43531 off1 := v.AuxInt 43532 sym := v.Aux 43533 mem := v.Args[2] 43534 v_0 := v.Args[0] 43535 if v_0.Op != OpAMD64ADDQconst { 43536 break 43537 } 43538 off2 := v_0.AuxInt 43539 base := v_0.Args[0] 43540 val := v.Args[1] 43541 if !(is32Bit(off1 + off2)) { 43542 break 43543 } 43544 v.reset(OpAMD64SETEQstore) 43545 v.AuxInt = off1 + off2 43546 v.Aux = sym 43547 v.AddArg(base) 43548 v.AddArg(val) 43549 v.AddArg(mem) 43550 return true 43551 } 43552 // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 43553 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 43554 // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 43555 for { 43556 off1 := v.AuxInt 43557 sym1 := v.Aux 43558 mem := v.Args[2] 43559 v_0 := v.Args[0] 43560 if v_0.Op != OpAMD64LEAQ { 43561 break 43562 } 43563 off2 := v_0.AuxInt 43564 sym2 := v_0.Aux 43565 base := v_0.Args[0] 43566 val := v.Args[1] 43567 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 43568 break 43569 } 43570 v.reset(OpAMD64SETEQstore) 43571 v.AuxInt = off1 + off2 43572 v.Aux = mergeSym(sym1, sym2) 43573 v.AddArg(base) 43574 v.AddArg(val) 43575 v.AddArg(mem) 43576 return true 43577 } 43578 // match: (SETEQstore [off] {sym} ptr (FlagEQ) mem) 43579 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 43580 for { 43581 off := v.AuxInt 43582 sym := v.Aux 43583 mem := v.Args[2] 43584 ptr := v.Args[0] 43585 v_1 := v.Args[1] 43586 if v_1.Op != OpAMD64FlagEQ { 43587 break 43588 } 43589 v.reset(OpAMD64MOVBstore) 43590 v.AuxInt = off 43591 v.Aux = sym 43592 v.AddArg(ptr) 43593 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 43594 v0.AuxInt = 1 43595 v.AddArg(v0) 43596 v.AddArg(mem) 43597 return true 43598 } 43599 // match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) 43600 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 43601 for { 43602 off := v.AuxInt 43603 sym := v.Aux 43604 mem := v.Args[2] 43605 ptr := v.Args[0] 43606 v_1 := v.Args[1] 43607 if v_1.Op != OpAMD64FlagLT_ULT { 43608 break 43609 } 43610 v.reset(OpAMD64MOVBstore) 43611 v.AuxInt = off 43612 v.Aux = sym 43613 v.AddArg(ptr) 43614 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 43615 v0.AuxInt = 0 43616 v.AddArg(v0) 43617 v.AddArg(mem) 43618 return true 43619 } 43620 // match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) 43621 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 43622 for { 43623 off := v.AuxInt 43624 sym := v.Aux 43625 mem := v.Args[2] 43626 ptr := v.Args[0] 43627 v_1 := v.Args[1] 43628 if v_1.Op != OpAMD64FlagLT_UGT { 43629 break 43630 } 43631 v.reset(OpAMD64MOVBstore) 43632 v.AuxInt = off 43633 v.Aux = sym 43634 v.AddArg(ptr) 43635 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 43636 v0.AuxInt = 0 43637 v.AddArg(v0) 43638 v.AddArg(mem) 43639 return true 43640 } 43641 // match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) 43642 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 43643 for { 43644 off := v.AuxInt 43645 sym := v.Aux 43646 mem := v.Args[2] 43647 ptr := v.Args[0] 43648 v_1 := v.Args[1] 43649 if v_1.Op != OpAMD64FlagGT_ULT { 43650 break 43651 } 43652 v.reset(OpAMD64MOVBstore) 43653 v.AuxInt = off 43654 v.Aux = sym 43655 v.AddArg(ptr) 43656 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 43657 v0.AuxInt = 0 43658 v.AddArg(v0) 43659 v.AddArg(mem) 43660 return true 43661 } 43662 // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) 43663 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 43664 for { 43665 off := v.AuxInt 43666 sym := v.Aux 43667 mem := v.Args[2] 43668 ptr := v.Args[0] 43669 v_1 := v.Args[1] 43670 if v_1.Op != OpAMD64FlagGT_UGT { 43671 break 43672 } 43673 v.reset(OpAMD64MOVBstore) 43674 v.AuxInt = off 43675 v.Aux = sym 43676 v.AddArg(ptr) 43677 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 43678 v0.AuxInt = 0 43679 v.AddArg(v0) 43680 v.AddArg(mem) 43681 return true 43682 } 43683 return false 43684 } 43685 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { 43686 // match: (SETG (InvertFlags x)) 43687 // result: (SETL x) 43688 for { 43689 v_0 := v.Args[0] 43690 if v_0.Op != OpAMD64InvertFlags { 43691 break 43692 } 43693 x := v_0.Args[0] 43694 v.reset(OpAMD64SETL) 43695 v.AddArg(x) 43696 return true 43697 } 43698 // match: (SETG (FlagEQ)) 43699 // result: (MOVLconst [0]) 43700 for { 43701 v_0 := v.Args[0] 43702 if v_0.Op != OpAMD64FlagEQ { 43703 break 43704 } 43705 v.reset(OpAMD64MOVLconst) 43706 v.AuxInt = 0 43707 return true 43708 } 43709 // match: (SETG (FlagLT_ULT)) 43710 // result: (MOVLconst [0]) 43711 for { 43712 v_0 := v.Args[0] 43713 if v_0.Op != OpAMD64FlagLT_ULT { 43714 break 43715 } 43716 v.reset(OpAMD64MOVLconst) 43717 v.AuxInt = 0 43718 return true 43719 } 43720 // match: (SETG (FlagLT_UGT)) 43721 // result: (MOVLconst [0]) 43722 for { 43723 v_0 := v.Args[0] 43724 if v_0.Op != OpAMD64FlagLT_UGT { 43725 break 43726 } 43727 v.reset(OpAMD64MOVLconst) 43728 v.AuxInt = 0 43729 return true 43730 } 43731 // match: (SETG (FlagGT_ULT)) 43732 // result: (MOVLconst [1]) 43733 for { 43734 v_0 := v.Args[0] 43735 if v_0.Op != OpAMD64FlagGT_ULT { 43736 break 43737 } 43738 v.reset(OpAMD64MOVLconst) 43739 v.AuxInt = 1 43740 return true 43741 } 43742 // match: (SETG (FlagGT_UGT)) 43743 // result: (MOVLconst [1]) 43744 for { 43745 v_0 := v.Args[0] 43746 if v_0.Op != OpAMD64FlagGT_UGT { 43747 break 43748 } 43749 v.reset(OpAMD64MOVLconst) 43750 v.AuxInt = 1 43751 return true 43752 } 43753 return false 43754 } 43755 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { 43756 // match: (SETGE (InvertFlags x)) 43757 // result: (SETLE x) 43758 for { 43759 v_0 := v.Args[0] 43760 if v_0.Op != OpAMD64InvertFlags { 43761 break 43762 } 43763 x := v_0.Args[0] 43764 v.reset(OpAMD64SETLE) 43765 v.AddArg(x) 43766 return true 43767 } 43768 // match: (SETGE (FlagEQ)) 43769 // result: (MOVLconst [1]) 43770 for { 43771 v_0 := v.Args[0] 43772 if v_0.Op != OpAMD64FlagEQ { 43773 break 43774 } 43775 v.reset(OpAMD64MOVLconst) 43776 v.AuxInt = 1 43777 return true 43778 } 43779 // match: (SETGE (FlagLT_ULT)) 43780 // result: (MOVLconst [0]) 43781 for { 43782 v_0 := v.Args[0] 43783 if v_0.Op != OpAMD64FlagLT_ULT { 43784 break 43785 } 43786 v.reset(OpAMD64MOVLconst) 43787 v.AuxInt = 0 43788 return true 43789 } 43790 // match: (SETGE (FlagLT_UGT)) 43791 // result: (MOVLconst [0]) 43792 for { 43793 v_0 := v.Args[0] 43794 if v_0.Op != OpAMD64FlagLT_UGT { 43795 break 43796 } 43797 v.reset(OpAMD64MOVLconst) 43798 v.AuxInt = 0 43799 return true 43800 } 43801 // match: (SETGE (FlagGT_ULT)) 43802 // result: (MOVLconst [1]) 43803 for { 43804 v_0 := v.Args[0] 43805 if v_0.Op != OpAMD64FlagGT_ULT { 43806 break 43807 } 43808 v.reset(OpAMD64MOVLconst) 43809 v.AuxInt = 1 43810 return true 43811 } 43812 // match: (SETGE (FlagGT_UGT)) 43813 // result: (MOVLconst [1]) 43814 for { 43815 v_0 := v.Args[0] 43816 if v_0.Op != OpAMD64FlagGT_UGT { 43817 break 43818 } 43819 v.reset(OpAMD64MOVLconst) 43820 v.AuxInt = 1 43821 return true 43822 } 43823 return false 43824 } 43825 func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool { 43826 b := v.Block 43827 typ := &b.Func.Config.Types 43828 // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem) 43829 // result: (SETLEstore [off] {sym} ptr x mem) 43830 for { 43831 off := v.AuxInt 43832 sym := v.Aux 43833 mem := v.Args[2] 43834 ptr := v.Args[0] 43835 v_1 := v.Args[1] 43836 if v_1.Op != OpAMD64InvertFlags { 43837 break 43838 } 43839 x := v_1.Args[0] 43840 v.reset(OpAMD64SETLEstore) 43841 v.AuxInt = off 43842 v.Aux = sym 43843 v.AddArg(ptr) 43844 v.AddArg(x) 43845 v.AddArg(mem) 43846 return true 43847 } 43848 // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem) 43849 // cond: is32Bit(off1+off2) 43850 // result: (SETGEstore [off1+off2] {sym} base val mem) 43851 for { 43852 off1 := v.AuxInt 43853 sym := v.Aux 43854 mem := v.Args[2] 43855 v_0 := v.Args[0] 43856 if v_0.Op != OpAMD64ADDQconst { 43857 break 43858 } 43859 off2 := v_0.AuxInt 43860 base := v_0.Args[0] 43861 val := v.Args[1] 43862 if !(is32Bit(off1 + off2)) { 43863 break 43864 } 43865 v.reset(OpAMD64SETGEstore) 43866 v.AuxInt = off1 + off2 43867 v.Aux = sym 43868 v.AddArg(base) 43869 v.AddArg(val) 43870 v.AddArg(mem) 43871 return true 43872 } 43873 // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 43874 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 43875 // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 43876 for { 43877 off1 := v.AuxInt 43878 sym1 := v.Aux 43879 mem := v.Args[2] 43880 v_0 := v.Args[0] 43881 if v_0.Op != OpAMD64LEAQ { 43882 break 43883 } 43884 off2 := v_0.AuxInt 43885 sym2 := v_0.Aux 43886 base := v_0.Args[0] 43887 val := v.Args[1] 43888 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 43889 break 43890 } 43891 v.reset(OpAMD64SETGEstore) 43892 v.AuxInt = off1 + off2 43893 v.Aux = mergeSym(sym1, sym2) 43894 v.AddArg(base) 43895 v.AddArg(val) 43896 v.AddArg(mem) 43897 return true 43898 } 43899 // match: (SETGEstore [off] {sym} ptr (FlagEQ) mem) 43900 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 43901 for { 43902 off := v.AuxInt 43903 sym := v.Aux 43904 mem := v.Args[2] 43905 ptr := v.Args[0] 43906 v_1 := v.Args[1] 43907 if v_1.Op != OpAMD64FlagEQ { 43908 break 43909 } 43910 v.reset(OpAMD64MOVBstore) 43911 v.AuxInt = off 43912 v.Aux = sym 43913 v.AddArg(ptr) 43914 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 43915 v0.AuxInt = 1 43916 v.AddArg(v0) 43917 v.AddArg(mem) 43918 return true 43919 } 43920 // match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) 43921 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 43922 for { 43923 off := v.AuxInt 43924 sym := v.Aux 43925 mem := v.Args[2] 43926 ptr := v.Args[0] 43927 v_1 := v.Args[1] 43928 if v_1.Op != OpAMD64FlagLT_ULT { 43929 break 43930 } 43931 v.reset(OpAMD64MOVBstore) 43932 v.AuxInt = off 43933 v.Aux = sym 43934 v.AddArg(ptr) 43935 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 43936 v0.AuxInt = 0 43937 v.AddArg(v0) 43938 v.AddArg(mem) 43939 return true 43940 } 43941 // match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) 43942 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 43943 for { 43944 off := v.AuxInt 43945 sym := v.Aux 43946 mem := v.Args[2] 43947 ptr := v.Args[0] 43948 v_1 := v.Args[1] 43949 if v_1.Op != OpAMD64FlagLT_UGT { 43950 break 43951 } 43952 v.reset(OpAMD64MOVBstore) 43953 v.AuxInt = off 43954 v.Aux = sym 43955 v.AddArg(ptr) 43956 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 43957 v0.AuxInt = 0 43958 v.AddArg(v0) 43959 v.AddArg(mem) 43960 return true 43961 } 43962 // match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) 43963 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 43964 for { 43965 off := v.AuxInt 43966 sym := v.Aux 43967 mem := v.Args[2] 43968 ptr := v.Args[0] 43969 v_1 := v.Args[1] 43970 if v_1.Op != OpAMD64FlagGT_ULT { 43971 break 43972 } 43973 v.reset(OpAMD64MOVBstore) 43974 v.AuxInt = off 43975 v.Aux = sym 43976 v.AddArg(ptr) 43977 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 43978 v0.AuxInt = 1 43979 v.AddArg(v0) 43980 v.AddArg(mem) 43981 return true 43982 } 43983 // match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) 43984 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 43985 for { 43986 off := v.AuxInt 43987 sym := v.Aux 43988 mem := v.Args[2] 43989 ptr := v.Args[0] 43990 v_1 := v.Args[1] 43991 if v_1.Op != OpAMD64FlagGT_UGT { 43992 break 43993 } 43994 v.reset(OpAMD64MOVBstore) 43995 v.AuxInt = off 43996 v.Aux = sym 43997 v.AddArg(ptr) 43998 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 43999 v0.AuxInt = 1 44000 v.AddArg(v0) 44001 v.AddArg(mem) 44002 return true 44003 } 44004 return false 44005 } 44006 func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool { 44007 b := v.Block 44008 typ := &b.Func.Config.Types 44009 // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem) 44010 // result: (SETLstore [off] {sym} ptr x mem) 44011 for { 44012 off := v.AuxInt 44013 sym := v.Aux 44014 mem := v.Args[2] 44015 ptr := v.Args[0] 44016 v_1 := v.Args[1] 44017 if v_1.Op != OpAMD64InvertFlags { 44018 break 44019 } 44020 x := v_1.Args[0] 44021 v.reset(OpAMD64SETLstore) 44022 v.AuxInt = off 44023 v.Aux = sym 44024 v.AddArg(ptr) 44025 v.AddArg(x) 44026 v.AddArg(mem) 44027 return true 44028 } 44029 // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem) 44030 // cond: is32Bit(off1+off2) 44031 // result: (SETGstore [off1+off2] {sym} base val mem) 44032 for { 44033 off1 := v.AuxInt 44034 sym := v.Aux 44035 mem := v.Args[2] 44036 v_0 := v.Args[0] 44037 if v_0.Op != OpAMD64ADDQconst { 44038 break 44039 } 44040 off2 := v_0.AuxInt 44041 base := v_0.Args[0] 44042 val := v.Args[1] 44043 if !(is32Bit(off1 + off2)) { 44044 break 44045 } 44046 v.reset(OpAMD64SETGstore) 44047 v.AuxInt = off1 + off2 44048 v.Aux = sym 44049 v.AddArg(base) 44050 v.AddArg(val) 44051 v.AddArg(mem) 44052 return true 44053 } 44054 // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 44055 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 44056 // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 44057 for { 44058 off1 := v.AuxInt 44059 sym1 := v.Aux 44060 mem := v.Args[2] 44061 v_0 := v.Args[0] 44062 if v_0.Op != OpAMD64LEAQ { 44063 break 44064 } 44065 off2 := v_0.AuxInt 44066 sym2 := v_0.Aux 44067 base := v_0.Args[0] 44068 val := v.Args[1] 44069 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 44070 break 44071 } 44072 v.reset(OpAMD64SETGstore) 44073 v.AuxInt = off1 + off2 44074 v.Aux = mergeSym(sym1, sym2) 44075 v.AddArg(base) 44076 v.AddArg(val) 44077 v.AddArg(mem) 44078 return true 44079 } 44080 // match: (SETGstore [off] {sym} ptr (FlagEQ) mem) 44081 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 44082 for { 44083 off := v.AuxInt 44084 sym := v.Aux 44085 mem := v.Args[2] 44086 ptr := v.Args[0] 44087 v_1 := v.Args[1] 44088 if v_1.Op != OpAMD64FlagEQ { 44089 break 44090 } 44091 v.reset(OpAMD64MOVBstore) 44092 v.AuxInt = off 44093 v.Aux = sym 44094 v.AddArg(ptr) 44095 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44096 v0.AuxInt = 0 44097 v.AddArg(v0) 44098 v.AddArg(mem) 44099 return true 44100 } 44101 // match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem) 44102 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 44103 for { 44104 off := v.AuxInt 44105 sym := v.Aux 44106 mem := v.Args[2] 44107 ptr := v.Args[0] 44108 v_1 := v.Args[1] 44109 if v_1.Op != OpAMD64FlagLT_ULT { 44110 break 44111 } 44112 v.reset(OpAMD64MOVBstore) 44113 v.AuxInt = off 44114 v.Aux = sym 44115 v.AddArg(ptr) 44116 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44117 v0.AuxInt = 0 44118 v.AddArg(v0) 44119 v.AddArg(mem) 44120 return true 44121 } 44122 // match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem) 44123 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 44124 for { 44125 off := v.AuxInt 44126 sym := v.Aux 44127 mem := v.Args[2] 44128 ptr := v.Args[0] 44129 v_1 := v.Args[1] 44130 if v_1.Op != OpAMD64FlagLT_UGT { 44131 break 44132 } 44133 v.reset(OpAMD64MOVBstore) 44134 v.AuxInt = off 44135 v.Aux = sym 44136 v.AddArg(ptr) 44137 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44138 v0.AuxInt = 0 44139 v.AddArg(v0) 44140 v.AddArg(mem) 44141 return true 44142 } 44143 // match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem) 44144 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 44145 for { 44146 off := v.AuxInt 44147 sym := v.Aux 44148 mem := v.Args[2] 44149 ptr := v.Args[0] 44150 v_1 := v.Args[1] 44151 if v_1.Op != OpAMD64FlagGT_ULT { 44152 break 44153 } 44154 v.reset(OpAMD64MOVBstore) 44155 v.AuxInt = off 44156 v.Aux = sym 44157 v.AddArg(ptr) 44158 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44159 v0.AuxInt = 1 44160 v.AddArg(v0) 44161 v.AddArg(mem) 44162 return true 44163 } 44164 // match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem) 44165 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 44166 for { 44167 off := v.AuxInt 44168 sym := v.Aux 44169 mem := v.Args[2] 44170 ptr := v.Args[0] 44171 v_1 := v.Args[1] 44172 if v_1.Op != OpAMD64FlagGT_UGT { 44173 break 44174 } 44175 v.reset(OpAMD64MOVBstore) 44176 v.AuxInt = off 44177 v.Aux = sym 44178 v.AddArg(ptr) 44179 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44180 v0.AuxInt = 1 44181 v.AddArg(v0) 44182 v.AddArg(mem) 44183 return true 44184 } 44185 return false 44186 } 44187 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { 44188 // match: (SETL (InvertFlags x)) 44189 // result: (SETG x) 44190 for { 44191 v_0 := v.Args[0] 44192 if v_0.Op != OpAMD64InvertFlags { 44193 break 44194 } 44195 x := v_0.Args[0] 44196 v.reset(OpAMD64SETG) 44197 v.AddArg(x) 44198 return true 44199 } 44200 // match: (SETL (FlagEQ)) 44201 // result: (MOVLconst [0]) 44202 for { 44203 v_0 := v.Args[0] 44204 if v_0.Op != OpAMD64FlagEQ { 44205 break 44206 } 44207 v.reset(OpAMD64MOVLconst) 44208 v.AuxInt = 0 44209 return true 44210 } 44211 // match: (SETL (FlagLT_ULT)) 44212 // result: (MOVLconst [1]) 44213 for { 44214 v_0 := v.Args[0] 44215 if v_0.Op != OpAMD64FlagLT_ULT { 44216 break 44217 } 44218 v.reset(OpAMD64MOVLconst) 44219 v.AuxInt = 1 44220 return true 44221 } 44222 // match: (SETL (FlagLT_UGT)) 44223 // result: (MOVLconst [1]) 44224 for { 44225 v_0 := v.Args[0] 44226 if v_0.Op != OpAMD64FlagLT_UGT { 44227 break 44228 } 44229 v.reset(OpAMD64MOVLconst) 44230 v.AuxInt = 1 44231 return true 44232 } 44233 // match: (SETL (FlagGT_ULT)) 44234 // result: (MOVLconst [0]) 44235 for { 44236 v_0 := v.Args[0] 44237 if v_0.Op != OpAMD64FlagGT_ULT { 44238 break 44239 } 44240 v.reset(OpAMD64MOVLconst) 44241 v.AuxInt = 0 44242 return true 44243 } 44244 // match: (SETL (FlagGT_UGT)) 44245 // result: (MOVLconst [0]) 44246 for { 44247 v_0 := v.Args[0] 44248 if v_0.Op != OpAMD64FlagGT_UGT { 44249 break 44250 } 44251 v.reset(OpAMD64MOVLconst) 44252 v.AuxInt = 0 44253 return true 44254 } 44255 return false 44256 } 44257 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { 44258 // match: (SETLE (InvertFlags x)) 44259 // result: (SETGE x) 44260 for { 44261 v_0 := v.Args[0] 44262 if v_0.Op != OpAMD64InvertFlags { 44263 break 44264 } 44265 x := v_0.Args[0] 44266 v.reset(OpAMD64SETGE) 44267 v.AddArg(x) 44268 return true 44269 } 44270 // match: (SETLE (FlagEQ)) 44271 // result: (MOVLconst [1]) 44272 for { 44273 v_0 := v.Args[0] 44274 if v_0.Op != OpAMD64FlagEQ { 44275 break 44276 } 44277 v.reset(OpAMD64MOVLconst) 44278 v.AuxInt = 1 44279 return true 44280 } 44281 // match: (SETLE (FlagLT_ULT)) 44282 // result: (MOVLconst [1]) 44283 for { 44284 v_0 := v.Args[0] 44285 if v_0.Op != OpAMD64FlagLT_ULT { 44286 break 44287 } 44288 v.reset(OpAMD64MOVLconst) 44289 v.AuxInt = 1 44290 return true 44291 } 44292 // match: (SETLE (FlagLT_UGT)) 44293 // result: (MOVLconst [1]) 44294 for { 44295 v_0 := v.Args[0] 44296 if v_0.Op != OpAMD64FlagLT_UGT { 44297 break 44298 } 44299 v.reset(OpAMD64MOVLconst) 44300 v.AuxInt = 1 44301 return true 44302 } 44303 // match: (SETLE (FlagGT_ULT)) 44304 // result: (MOVLconst [0]) 44305 for { 44306 v_0 := v.Args[0] 44307 if v_0.Op != OpAMD64FlagGT_ULT { 44308 break 44309 } 44310 v.reset(OpAMD64MOVLconst) 44311 v.AuxInt = 0 44312 return true 44313 } 44314 // match: (SETLE (FlagGT_UGT)) 44315 // result: (MOVLconst [0]) 44316 for { 44317 v_0 := v.Args[0] 44318 if v_0.Op != OpAMD64FlagGT_UGT { 44319 break 44320 } 44321 v.reset(OpAMD64MOVLconst) 44322 v.AuxInt = 0 44323 return true 44324 } 44325 return false 44326 } 44327 func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool { 44328 b := v.Block 44329 typ := &b.Func.Config.Types 44330 // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem) 44331 // result: (SETGEstore [off] {sym} ptr x mem) 44332 for { 44333 off := v.AuxInt 44334 sym := v.Aux 44335 mem := v.Args[2] 44336 ptr := v.Args[0] 44337 v_1 := v.Args[1] 44338 if v_1.Op != OpAMD64InvertFlags { 44339 break 44340 } 44341 x := v_1.Args[0] 44342 v.reset(OpAMD64SETGEstore) 44343 v.AuxInt = off 44344 v.Aux = sym 44345 v.AddArg(ptr) 44346 v.AddArg(x) 44347 v.AddArg(mem) 44348 return true 44349 } 44350 // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem) 44351 // cond: is32Bit(off1+off2) 44352 // result: (SETLEstore [off1+off2] {sym} base val mem) 44353 for { 44354 off1 := v.AuxInt 44355 sym := v.Aux 44356 mem := v.Args[2] 44357 v_0 := v.Args[0] 44358 if v_0.Op != OpAMD64ADDQconst { 44359 break 44360 } 44361 off2 := v_0.AuxInt 44362 base := v_0.Args[0] 44363 val := v.Args[1] 44364 if !(is32Bit(off1 + off2)) { 44365 break 44366 } 44367 v.reset(OpAMD64SETLEstore) 44368 v.AuxInt = off1 + off2 44369 v.Aux = sym 44370 v.AddArg(base) 44371 v.AddArg(val) 44372 v.AddArg(mem) 44373 return true 44374 } 44375 // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 44376 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 44377 // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 44378 for { 44379 off1 := v.AuxInt 44380 sym1 := v.Aux 44381 mem := v.Args[2] 44382 v_0 := v.Args[0] 44383 if v_0.Op != OpAMD64LEAQ { 44384 break 44385 } 44386 off2 := v_0.AuxInt 44387 sym2 := v_0.Aux 44388 base := v_0.Args[0] 44389 val := v.Args[1] 44390 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 44391 break 44392 } 44393 v.reset(OpAMD64SETLEstore) 44394 v.AuxInt = off1 + off2 44395 v.Aux = mergeSym(sym1, sym2) 44396 v.AddArg(base) 44397 v.AddArg(val) 44398 v.AddArg(mem) 44399 return true 44400 } 44401 // match: (SETLEstore [off] {sym} ptr (FlagEQ) mem) 44402 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 44403 for { 44404 off := v.AuxInt 44405 sym := v.Aux 44406 mem := v.Args[2] 44407 ptr := v.Args[0] 44408 v_1 := v.Args[1] 44409 if v_1.Op != OpAMD64FlagEQ { 44410 break 44411 } 44412 v.reset(OpAMD64MOVBstore) 44413 v.AuxInt = off 44414 v.Aux = sym 44415 v.AddArg(ptr) 44416 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44417 v0.AuxInt = 1 44418 v.AddArg(v0) 44419 v.AddArg(mem) 44420 return true 44421 } 44422 // match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) 44423 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 44424 for { 44425 off := v.AuxInt 44426 sym := v.Aux 44427 mem := v.Args[2] 44428 ptr := v.Args[0] 44429 v_1 := v.Args[1] 44430 if v_1.Op != OpAMD64FlagLT_ULT { 44431 break 44432 } 44433 v.reset(OpAMD64MOVBstore) 44434 v.AuxInt = off 44435 v.Aux = sym 44436 v.AddArg(ptr) 44437 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44438 v0.AuxInt = 1 44439 v.AddArg(v0) 44440 v.AddArg(mem) 44441 return true 44442 } 44443 // match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) 44444 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 44445 for { 44446 off := v.AuxInt 44447 sym := v.Aux 44448 mem := v.Args[2] 44449 ptr := v.Args[0] 44450 v_1 := v.Args[1] 44451 if v_1.Op != OpAMD64FlagLT_UGT { 44452 break 44453 } 44454 v.reset(OpAMD64MOVBstore) 44455 v.AuxInt = off 44456 v.Aux = sym 44457 v.AddArg(ptr) 44458 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44459 v0.AuxInt = 1 44460 v.AddArg(v0) 44461 v.AddArg(mem) 44462 return true 44463 } 44464 // match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) 44465 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 44466 for { 44467 off := v.AuxInt 44468 sym := v.Aux 44469 mem := v.Args[2] 44470 ptr := v.Args[0] 44471 v_1 := v.Args[1] 44472 if v_1.Op != OpAMD64FlagGT_ULT { 44473 break 44474 } 44475 v.reset(OpAMD64MOVBstore) 44476 v.AuxInt = off 44477 v.Aux = sym 44478 v.AddArg(ptr) 44479 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44480 v0.AuxInt = 0 44481 v.AddArg(v0) 44482 v.AddArg(mem) 44483 return true 44484 } 44485 // match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) 44486 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 44487 for { 44488 off := v.AuxInt 44489 sym := v.Aux 44490 mem := v.Args[2] 44491 ptr := v.Args[0] 44492 v_1 := v.Args[1] 44493 if v_1.Op != OpAMD64FlagGT_UGT { 44494 break 44495 } 44496 v.reset(OpAMD64MOVBstore) 44497 v.AuxInt = off 44498 v.Aux = sym 44499 v.AddArg(ptr) 44500 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44501 v0.AuxInt = 0 44502 v.AddArg(v0) 44503 v.AddArg(mem) 44504 return true 44505 } 44506 return false 44507 } 44508 func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool { 44509 b := v.Block 44510 typ := &b.Func.Config.Types 44511 // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem) 44512 // result: (SETGstore [off] {sym} ptr x mem) 44513 for { 44514 off := v.AuxInt 44515 sym := v.Aux 44516 mem := v.Args[2] 44517 ptr := v.Args[0] 44518 v_1 := v.Args[1] 44519 if v_1.Op != OpAMD64InvertFlags { 44520 break 44521 } 44522 x := v_1.Args[0] 44523 v.reset(OpAMD64SETGstore) 44524 v.AuxInt = off 44525 v.Aux = sym 44526 v.AddArg(ptr) 44527 v.AddArg(x) 44528 v.AddArg(mem) 44529 return true 44530 } 44531 // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem) 44532 // cond: is32Bit(off1+off2) 44533 // result: (SETLstore [off1+off2] {sym} base val mem) 44534 for { 44535 off1 := v.AuxInt 44536 sym := v.Aux 44537 mem := v.Args[2] 44538 v_0 := v.Args[0] 44539 if v_0.Op != OpAMD64ADDQconst { 44540 break 44541 } 44542 off2 := v_0.AuxInt 44543 base := v_0.Args[0] 44544 val := v.Args[1] 44545 if !(is32Bit(off1 + off2)) { 44546 break 44547 } 44548 v.reset(OpAMD64SETLstore) 44549 v.AuxInt = off1 + off2 44550 v.Aux = sym 44551 v.AddArg(base) 44552 v.AddArg(val) 44553 v.AddArg(mem) 44554 return true 44555 } 44556 // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 44557 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 44558 // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 44559 for { 44560 off1 := v.AuxInt 44561 sym1 := v.Aux 44562 mem := v.Args[2] 44563 v_0 := v.Args[0] 44564 if v_0.Op != OpAMD64LEAQ { 44565 break 44566 } 44567 off2 := v_0.AuxInt 44568 sym2 := v_0.Aux 44569 base := v_0.Args[0] 44570 val := v.Args[1] 44571 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 44572 break 44573 } 44574 v.reset(OpAMD64SETLstore) 44575 v.AuxInt = off1 + off2 44576 v.Aux = mergeSym(sym1, sym2) 44577 v.AddArg(base) 44578 v.AddArg(val) 44579 v.AddArg(mem) 44580 return true 44581 } 44582 // match: (SETLstore [off] {sym} ptr (FlagEQ) mem) 44583 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 44584 for { 44585 off := v.AuxInt 44586 sym := v.Aux 44587 mem := v.Args[2] 44588 ptr := v.Args[0] 44589 v_1 := v.Args[1] 44590 if v_1.Op != OpAMD64FlagEQ { 44591 break 44592 } 44593 v.reset(OpAMD64MOVBstore) 44594 v.AuxInt = off 44595 v.Aux = sym 44596 v.AddArg(ptr) 44597 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44598 v0.AuxInt = 0 44599 v.AddArg(v0) 44600 v.AddArg(mem) 44601 return true 44602 } 44603 // match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem) 44604 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 44605 for { 44606 off := v.AuxInt 44607 sym := v.Aux 44608 mem := v.Args[2] 44609 ptr := v.Args[0] 44610 v_1 := v.Args[1] 44611 if v_1.Op != OpAMD64FlagLT_ULT { 44612 break 44613 } 44614 v.reset(OpAMD64MOVBstore) 44615 v.AuxInt = off 44616 v.Aux = sym 44617 v.AddArg(ptr) 44618 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44619 v0.AuxInt = 1 44620 v.AddArg(v0) 44621 v.AddArg(mem) 44622 return true 44623 } 44624 // match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem) 44625 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 44626 for { 44627 off := v.AuxInt 44628 sym := v.Aux 44629 mem := v.Args[2] 44630 ptr := v.Args[0] 44631 v_1 := v.Args[1] 44632 if v_1.Op != OpAMD64FlagLT_UGT { 44633 break 44634 } 44635 v.reset(OpAMD64MOVBstore) 44636 v.AuxInt = off 44637 v.Aux = sym 44638 v.AddArg(ptr) 44639 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44640 v0.AuxInt = 1 44641 v.AddArg(v0) 44642 v.AddArg(mem) 44643 return true 44644 } 44645 // match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem) 44646 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 44647 for { 44648 off := v.AuxInt 44649 sym := v.Aux 44650 mem := v.Args[2] 44651 ptr := v.Args[0] 44652 v_1 := v.Args[1] 44653 if v_1.Op != OpAMD64FlagGT_ULT { 44654 break 44655 } 44656 v.reset(OpAMD64MOVBstore) 44657 v.AuxInt = off 44658 v.Aux = sym 44659 v.AddArg(ptr) 44660 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44661 v0.AuxInt = 0 44662 v.AddArg(v0) 44663 v.AddArg(mem) 44664 return true 44665 } 44666 // match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem) 44667 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 44668 for { 44669 off := v.AuxInt 44670 sym := v.Aux 44671 mem := v.Args[2] 44672 ptr := v.Args[0] 44673 v_1 := v.Args[1] 44674 if v_1.Op != OpAMD64FlagGT_UGT { 44675 break 44676 } 44677 v.reset(OpAMD64MOVBstore) 44678 v.AuxInt = off 44679 v.Aux = sym 44680 v.AddArg(ptr) 44681 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 44682 v0.AuxInt = 0 44683 v.AddArg(v0) 44684 v.AddArg(mem) 44685 return true 44686 } 44687 return false 44688 } 44689 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { 44690 b := v.Block 44691 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 44692 // result: (SETB (BTL x y)) 44693 for { 44694 v_0 := v.Args[0] 44695 if v_0.Op != OpAMD64TESTL { 44696 break 44697 } 44698 y := v_0.Args[1] 44699 v_0_0 := v_0.Args[0] 44700 if v_0_0.Op != OpAMD64SHLL { 44701 break 44702 } 44703 x := v_0_0.Args[1] 44704 v_0_0_0 := v_0_0.Args[0] 44705 if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { 44706 break 44707 } 44708 v.reset(OpAMD64SETB) 44709 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 44710 v0.AddArg(x) 44711 v0.AddArg(y) 44712 v.AddArg(v0) 44713 return true 44714 } 44715 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 44716 // result: (SETB (BTL x y)) 44717 for { 44718 v_0 := v.Args[0] 44719 if v_0.Op != OpAMD64TESTL { 44720 break 44721 } 44722 _ = v_0.Args[1] 44723 y := v_0.Args[0] 44724 v_0_1 := v_0.Args[1] 44725 if v_0_1.Op != OpAMD64SHLL { 44726 break 44727 } 44728 x := v_0_1.Args[1] 44729 v_0_1_0 := v_0_1.Args[0] 44730 if v_0_1_0.Op != OpAMD64MOVLconst || v_0_1_0.AuxInt != 1 { 44731 break 44732 } 44733 v.reset(OpAMD64SETB) 44734 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 44735 v0.AddArg(x) 44736 v0.AddArg(y) 44737 v.AddArg(v0) 44738 return true 44739 } 44740 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 44741 // result: (SETB (BTQ x y)) 44742 for { 44743 v_0 := v.Args[0] 44744 if v_0.Op != OpAMD64TESTQ { 44745 break 44746 } 44747 y := v_0.Args[1] 44748 v_0_0 := v_0.Args[0] 44749 if v_0_0.Op != OpAMD64SHLQ { 44750 break 44751 } 44752 x := v_0_0.Args[1] 44753 v_0_0_0 := v_0_0.Args[0] 44754 if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { 44755 break 44756 } 44757 v.reset(OpAMD64SETB) 44758 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 44759 v0.AddArg(x) 44760 v0.AddArg(y) 44761 v.AddArg(v0) 44762 return true 44763 } 44764 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 44765 // result: (SETB (BTQ x y)) 44766 for { 44767 v_0 := v.Args[0] 44768 if v_0.Op != OpAMD64TESTQ { 44769 break 44770 } 44771 _ = v_0.Args[1] 44772 y := v_0.Args[0] 44773 v_0_1 := v_0.Args[1] 44774 if v_0_1.Op != OpAMD64SHLQ { 44775 break 44776 } 44777 x := v_0_1.Args[1] 44778 v_0_1_0 := v_0_1.Args[0] 44779 if v_0_1_0.Op != OpAMD64MOVQconst || v_0_1_0.AuxInt != 1 { 44780 break 44781 } 44782 v.reset(OpAMD64SETB) 44783 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 44784 v0.AddArg(x) 44785 v0.AddArg(y) 44786 v.AddArg(v0) 44787 return true 44788 } 44789 // match: (SETNE (TESTLconst [c] x)) 44790 // cond: isUint32PowerOfTwo(c) 44791 // result: (SETB (BTLconst [log2uint32(c)] x)) 44792 for { 44793 v_0 := v.Args[0] 44794 if v_0.Op != OpAMD64TESTLconst { 44795 break 44796 } 44797 c := v_0.AuxInt 44798 x := v_0.Args[0] 44799 if !(isUint32PowerOfTwo(c)) { 44800 break 44801 } 44802 v.reset(OpAMD64SETB) 44803 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 44804 v0.AuxInt = log2uint32(c) 44805 v0.AddArg(x) 44806 v.AddArg(v0) 44807 return true 44808 } 44809 // match: (SETNE (TESTQconst [c] x)) 44810 // cond: isUint64PowerOfTwo(c) 44811 // result: (SETB (BTQconst [log2(c)] x)) 44812 for { 44813 v_0 := v.Args[0] 44814 if v_0.Op != OpAMD64TESTQconst { 44815 break 44816 } 44817 c := v_0.AuxInt 44818 x := v_0.Args[0] 44819 if !(isUint64PowerOfTwo(c)) { 44820 break 44821 } 44822 v.reset(OpAMD64SETB) 44823 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44824 v0.AuxInt = log2(c) 44825 v0.AddArg(x) 44826 v.AddArg(v0) 44827 return true 44828 } 44829 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 44830 // cond: isUint64PowerOfTwo(c) 44831 // result: (SETB (BTQconst [log2(c)] x)) 44832 for { 44833 v_0 := v.Args[0] 44834 if v_0.Op != OpAMD64TESTQ { 44835 break 44836 } 44837 x := v_0.Args[1] 44838 v_0_0 := v_0.Args[0] 44839 if v_0_0.Op != OpAMD64MOVQconst { 44840 break 44841 } 44842 c := v_0_0.AuxInt 44843 if !(isUint64PowerOfTwo(c)) { 44844 break 44845 } 44846 v.reset(OpAMD64SETB) 44847 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44848 v0.AuxInt = log2(c) 44849 v0.AddArg(x) 44850 v.AddArg(v0) 44851 return true 44852 } 44853 // match: (SETNE (TESTQ x (MOVQconst [c]))) 44854 // cond: isUint64PowerOfTwo(c) 44855 // result: (SETB (BTQconst [log2(c)] x)) 44856 for { 44857 v_0 := v.Args[0] 44858 if v_0.Op != OpAMD64TESTQ { 44859 break 44860 } 44861 _ = v_0.Args[1] 44862 x := v_0.Args[0] 44863 v_0_1 := v_0.Args[1] 44864 if v_0_1.Op != OpAMD64MOVQconst { 44865 break 44866 } 44867 c := v_0_1.AuxInt 44868 if !(isUint64PowerOfTwo(c)) { 44869 break 44870 } 44871 v.reset(OpAMD64SETB) 44872 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44873 v0.AuxInt = log2(c) 44874 v0.AddArg(x) 44875 v.AddArg(v0) 44876 return true 44877 } 44878 // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _))) 44879 // result: (SETEQ (CMPLconst [0] s)) 44880 for { 44881 v_0 := v.Args[0] 44882 if v_0.Op != OpAMD64CMPLconst || v_0.AuxInt != 1 { 44883 break 44884 } 44885 s := v_0.Args[0] 44886 if s.Op != OpAMD64ANDLconst || s.AuxInt != 1 { 44887 break 44888 } 44889 v.reset(OpAMD64SETEQ) 44890 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44891 v0.AuxInt = 0 44892 v0.AddArg(s) 44893 v.AddArg(v0) 44894 return true 44895 } 44896 // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _))) 44897 // result: (SETEQ (CMPQconst [0] s)) 44898 for { 44899 v_0 := v.Args[0] 44900 if v_0.Op != OpAMD64CMPQconst || v_0.AuxInt != 1 { 44901 break 44902 } 44903 s := v_0.Args[0] 44904 if s.Op != OpAMD64ANDQconst || s.AuxInt != 1 { 44905 break 44906 } 44907 v.reset(OpAMD64SETEQ) 44908 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44909 v0.AuxInt = 0 44910 v0.AddArg(s) 44911 v.AddArg(v0) 44912 return true 44913 } 44914 return false 44915 } 44916 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { 44917 b := v.Block 44918 // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 44919 // cond: z1==z2 44920 // result: (SETB (BTQconst [63] x)) 44921 for { 44922 v_0 := v.Args[0] 44923 if v_0.Op != OpAMD64TESTQ { 44924 break 44925 } 44926 z2 := v_0.Args[1] 44927 z1 := v_0.Args[0] 44928 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { 44929 break 44930 } 44931 z1_0 := z1.Args[0] 44932 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { 44933 break 44934 } 44935 x := z1_0.Args[0] 44936 if !(z1 == z2) { 44937 break 44938 } 44939 v.reset(OpAMD64SETB) 44940 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44941 v0.AuxInt = 63 44942 v0.AddArg(x) 44943 v.AddArg(v0) 44944 return true 44945 } 44946 // match: (SETNE (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) 44947 // cond: z1==z2 44948 // result: (SETB (BTQconst [63] x)) 44949 for { 44950 v_0 := v.Args[0] 44951 if v_0.Op != OpAMD64TESTQ { 44952 break 44953 } 44954 _ = v_0.Args[1] 44955 z2 := v_0.Args[0] 44956 z1 := v_0.Args[1] 44957 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { 44958 break 44959 } 44960 z1_0 := z1.Args[0] 44961 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { 44962 break 44963 } 44964 x := z1_0.Args[0] 44965 if !(z1 == z2) { 44966 break 44967 } 44968 v.reset(OpAMD64SETB) 44969 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44970 v0.AuxInt = 63 44971 v0.AddArg(x) 44972 v.AddArg(v0) 44973 return true 44974 } 44975 // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 44976 // cond: z1==z2 44977 // result: (SETB (BTQconst [31] x)) 44978 for { 44979 v_0 := v.Args[0] 44980 if v_0.Op != OpAMD64TESTL { 44981 break 44982 } 44983 z2 := v_0.Args[1] 44984 z1 := v_0.Args[0] 44985 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { 44986 break 44987 } 44988 z1_0 := z1.Args[0] 44989 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { 44990 break 44991 } 44992 x := z1_0.Args[0] 44993 if !(z1 == z2) { 44994 break 44995 } 44996 v.reset(OpAMD64SETB) 44997 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44998 v0.AuxInt = 31 44999 v0.AddArg(x) 45000 v.AddArg(v0) 45001 return true 45002 } 45003 // match: (SETNE (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) 45004 // cond: z1==z2 45005 // result: (SETB (BTQconst [31] x)) 45006 for { 45007 v_0 := v.Args[0] 45008 if v_0.Op != OpAMD64TESTL { 45009 break 45010 } 45011 _ = v_0.Args[1] 45012 z2 := v_0.Args[0] 45013 z1 := v_0.Args[1] 45014 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { 45015 break 45016 } 45017 z1_0 := z1.Args[0] 45018 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { 45019 break 45020 } 45021 x := z1_0.Args[0] 45022 if !(z1 == z2) { 45023 break 45024 } 45025 v.reset(OpAMD64SETB) 45026 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45027 v0.AuxInt = 31 45028 v0.AddArg(x) 45029 v.AddArg(v0) 45030 return true 45031 } 45032 // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 45033 // cond: z1==z2 45034 // result: (SETB (BTQconst [0] x)) 45035 for { 45036 v_0 := v.Args[0] 45037 if v_0.Op != OpAMD64TESTQ { 45038 break 45039 } 45040 z2 := v_0.Args[1] 45041 z1 := v_0.Args[0] 45042 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 45043 break 45044 } 45045 z1_0 := z1.Args[0] 45046 if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { 45047 break 45048 } 45049 x := z1_0.Args[0] 45050 if !(z1 == z2) { 45051 break 45052 } 45053 v.reset(OpAMD64SETB) 45054 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45055 v0.AuxInt = 0 45056 v0.AddArg(x) 45057 v.AddArg(v0) 45058 return true 45059 } 45060 // match: (SETNE (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) 45061 // cond: z1==z2 45062 // result: (SETB (BTQconst [0] x)) 45063 for { 45064 v_0 := v.Args[0] 45065 if v_0.Op != OpAMD64TESTQ { 45066 break 45067 } 45068 _ = v_0.Args[1] 45069 z2 := v_0.Args[0] 45070 z1 := v_0.Args[1] 45071 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 45072 break 45073 } 45074 z1_0 := z1.Args[0] 45075 if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { 45076 break 45077 } 45078 x := z1_0.Args[0] 45079 if !(z1 == z2) { 45080 break 45081 } 45082 v.reset(OpAMD64SETB) 45083 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45084 v0.AuxInt = 0 45085 v0.AddArg(x) 45086 v.AddArg(v0) 45087 return true 45088 } 45089 // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 45090 // cond: z1==z2 45091 // result: (SETB (BTLconst [0] x)) 45092 for { 45093 v_0 := v.Args[0] 45094 if v_0.Op != OpAMD64TESTL { 45095 break 45096 } 45097 z2 := v_0.Args[1] 45098 z1 := v_0.Args[0] 45099 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 45100 break 45101 } 45102 z1_0 := z1.Args[0] 45103 if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { 45104 break 45105 } 45106 x := z1_0.Args[0] 45107 if !(z1 == z2) { 45108 break 45109 } 45110 v.reset(OpAMD64SETB) 45111 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 45112 v0.AuxInt = 0 45113 v0.AddArg(x) 45114 v.AddArg(v0) 45115 return true 45116 } 45117 // match: (SETNE (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) 45118 // cond: z1==z2 45119 // result: (SETB (BTLconst [0] x)) 45120 for { 45121 v_0 := v.Args[0] 45122 if v_0.Op != OpAMD64TESTL { 45123 break 45124 } 45125 _ = v_0.Args[1] 45126 z2 := v_0.Args[0] 45127 z1 := v_0.Args[1] 45128 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 45129 break 45130 } 45131 z1_0 := z1.Args[0] 45132 if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { 45133 break 45134 } 45135 x := z1_0.Args[0] 45136 if !(z1 == z2) { 45137 break 45138 } 45139 v.reset(OpAMD64SETB) 45140 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 45141 v0.AuxInt = 0 45142 v0.AddArg(x) 45143 v.AddArg(v0) 45144 return true 45145 } 45146 // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2)) 45147 // cond: z1==z2 45148 // result: (SETB (BTQconst [63] x)) 45149 for { 45150 v_0 := v.Args[0] 45151 if v_0.Op != OpAMD64TESTQ { 45152 break 45153 } 45154 z2 := v_0.Args[1] 45155 z1 := v_0.Args[0] 45156 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 45157 break 45158 } 45159 x := z1.Args[0] 45160 if !(z1 == z2) { 45161 break 45162 } 45163 v.reset(OpAMD64SETB) 45164 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45165 v0.AuxInt = 63 45166 v0.AddArg(x) 45167 v.AddArg(v0) 45168 return true 45169 } 45170 // match: (SETNE (TESTQ z2 z1:(SHRQconst [63] x))) 45171 // cond: z1==z2 45172 // result: (SETB (BTQconst [63] x)) 45173 for { 45174 v_0 := v.Args[0] 45175 if v_0.Op != OpAMD64TESTQ { 45176 break 45177 } 45178 _ = v_0.Args[1] 45179 z2 := v_0.Args[0] 45180 z1 := v_0.Args[1] 45181 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 45182 break 45183 } 45184 x := z1.Args[0] 45185 if !(z1 == z2) { 45186 break 45187 } 45188 v.reset(OpAMD64SETB) 45189 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45190 v0.AuxInt = 63 45191 v0.AddArg(x) 45192 v.AddArg(v0) 45193 return true 45194 } 45195 return false 45196 } 45197 func rewriteValueAMD64_OpAMD64SETNE_20(v *Value) bool { 45198 b := v.Block 45199 // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2)) 45200 // cond: z1==z2 45201 // result: (SETB (BTLconst [31] x)) 45202 for { 45203 v_0 := v.Args[0] 45204 if v_0.Op != OpAMD64TESTL { 45205 break 45206 } 45207 z2 := v_0.Args[1] 45208 z1 := v_0.Args[0] 45209 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 45210 break 45211 } 45212 x := z1.Args[0] 45213 if !(z1 == z2) { 45214 break 45215 } 45216 v.reset(OpAMD64SETB) 45217 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 45218 v0.AuxInt = 31 45219 v0.AddArg(x) 45220 v.AddArg(v0) 45221 return true 45222 } 45223 // match: (SETNE (TESTL z2 z1:(SHRLconst [31] x))) 45224 // cond: z1==z2 45225 // result: (SETB (BTLconst [31] x)) 45226 for { 45227 v_0 := v.Args[0] 45228 if v_0.Op != OpAMD64TESTL { 45229 break 45230 } 45231 _ = v_0.Args[1] 45232 z2 := v_0.Args[0] 45233 z1 := v_0.Args[1] 45234 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 45235 break 45236 } 45237 x := z1.Args[0] 45238 if !(z1 == z2) { 45239 break 45240 } 45241 v.reset(OpAMD64SETB) 45242 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 45243 v0.AuxInt = 31 45244 v0.AddArg(x) 45245 v.AddArg(v0) 45246 return true 45247 } 45248 // match: (SETNE (InvertFlags x)) 45249 // result: (SETNE x) 45250 for { 45251 v_0 := v.Args[0] 45252 if v_0.Op != OpAMD64InvertFlags { 45253 break 45254 } 45255 x := v_0.Args[0] 45256 v.reset(OpAMD64SETNE) 45257 v.AddArg(x) 45258 return true 45259 } 45260 // match: (SETNE (FlagEQ)) 45261 // result: (MOVLconst [0]) 45262 for { 45263 v_0 := v.Args[0] 45264 if v_0.Op != OpAMD64FlagEQ { 45265 break 45266 } 45267 v.reset(OpAMD64MOVLconst) 45268 v.AuxInt = 0 45269 return true 45270 } 45271 // match: (SETNE (FlagLT_ULT)) 45272 // result: (MOVLconst [1]) 45273 for { 45274 v_0 := v.Args[0] 45275 if v_0.Op != OpAMD64FlagLT_ULT { 45276 break 45277 } 45278 v.reset(OpAMD64MOVLconst) 45279 v.AuxInt = 1 45280 return true 45281 } 45282 // match: (SETNE (FlagLT_UGT)) 45283 // result: (MOVLconst [1]) 45284 for { 45285 v_0 := v.Args[0] 45286 if v_0.Op != OpAMD64FlagLT_UGT { 45287 break 45288 } 45289 v.reset(OpAMD64MOVLconst) 45290 v.AuxInt = 1 45291 return true 45292 } 45293 // match: (SETNE (FlagGT_ULT)) 45294 // result: (MOVLconst [1]) 45295 for { 45296 v_0 := v.Args[0] 45297 if v_0.Op != OpAMD64FlagGT_ULT { 45298 break 45299 } 45300 v.reset(OpAMD64MOVLconst) 45301 v.AuxInt = 1 45302 return true 45303 } 45304 // match: (SETNE (FlagGT_UGT)) 45305 // result: (MOVLconst [1]) 45306 for { 45307 v_0 := v.Args[0] 45308 if v_0.Op != OpAMD64FlagGT_UGT { 45309 break 45310 } 45311 v.reset(OpAMD64MOVLconst) 45312 v.AuxInt = 1 45313 return true 45314 } 45315 return false 45316 } 45317 func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool { 45318 b := v.Block 45319 // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 45320 // result: (SETBstore [off] {sym} ptr (BTL x y) mem) 45321 for { 45322 off := v.AuxInt 45323 sym := v.Aux 45324 mem := v.Args[2] 45325 ptr := v.Args[0] 45326 v_1 := v.Args[1] 45327 if v_1.Op != OpAMD64TESTL { 45328 break 45329 } 45330 y := v_1.Args[1] 45331 v_1_0 := v_1.Args[0] 45332 if v_1_0.Op != OpAMD64SHLL { 45333 break 45334 } 45335 x := v_1_0.Args[1] 45336 v_1_0_0 := v_1_0.Args[0] 45337 if v_1_0_0.Op != OpAMD64MOVLconst || v_1_0_0.AuxInt != 1 { 45338 break 45339 } 45340 v.reset(OpAMD64SETBstore) 45341 v.AuxInt = off 45342 v.Aux = sym 45343 v.AddArg(ptr) 45344 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 45345 v0.AddArg(x) 45346 v0.AddArg(y) 45347 v.AddArg(v0) 45348 v.AddArg(mem) 45349 return true 45350 } 45351 // match: (SETNEstore [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 45352 // result: (SETBstore [off] {sym} ptr (BTL x y) mem) 45353 for { 45354 off := v.AuxInt 45355 sym := v.Aux 45356 mem := v.Args[2] 45357 ptr := v.Args[0] 45358 v_1 := v.Args[1] 45359 if v_1.Op != OpAMD64TESTL { 45360 break 45361 } 45362 _ = v_1.Args[1] 45363 y := v_1.Args[0] 45364 v_1_1 := v_1.Args[1] 45365 if v_1_1.Op != OpAMD64SHLL { 45366 break 45367 } 45368 x := v_1_1.Args[1] 45369 v_1_1_0 := v_1_1.Args[0] 45370 if v_1_1_0.Op != OpAMD64MOVLconst || v_1_1_0.AuxInt != 1 { 45371 break 45372 } 45373 v.reset(OpAMD64SETBstore) 45374 v.AuxInt = off 45375 v.Aux = sym 45376 v.AddArg(ptr) 45377 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 45378 v0.AddArg(x) 45379 v0.AddArg(y) 45380 v.AddArg(v0) 45381 v.AddArg(mem) 45382 return true 45383 } 45384 // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 45385 // result: (SETBstore [off] {sym} ptr (BTQ x y) mem) 45386 for { 45387 off := v.AuxInt 45388 sym := v.Aux 45389 mem := v.Args[2] 45390 ptr := v.Args[0] 45391 v_1 := v.Args[1] 45392 if v_1.Op != OpAMD64TESTQ { 45393 break 45394 } 45395 y := v_1.Args[1] 45396 v_1_0 := v_1.Args[0] 45397 if v_1_0.Op != OpAMD64SHLQ { 45398 break 45399 } 45400 x := v_1_0.Args[1] 45401 v_1_0_0 := v_1_0.Args[0] 45402 if v_1_0_0.Op != OpAMD64MOVQconst || v_1_0_0.AuxInt != 1 { 45403 break 45404 } 45405 v.reset(OpAMD64SETBstore) 45406 v.AuxInt = off 45407 v.Aux = sym 45408 v.AddArg(ptr) 45409 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 45410 v0.AddArg(x) 45411 v0.AddArg(y) 45412 v.AddArg(v0) 45413 v.AddArg(mem) 45414 return true 45415 } 45416 // match: (SETNEstore [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 45417 // result: (SETBstore [off] {sym} ptr (BTQ x y) mem) 45418 for { 45419 off := v.AuxInt 45420 sym := v.Aux 45421 mem := v.Args[2] 45422 ptr := v.Args[0] 45423 v_1 := v.Args[1] 45424 if v_1.Op != OpAMD64TESTQ { 45425 break 45426 } 45427 _ = v_1.Args[1] 45428 y := v_1.Args[0] 45429 v_1_1 := v_1.Args[1] 45430 if v_1_1.Op != OpAMD64SHLQ { 45431 break 45432 } 45433 x := v_1_1.Args[1] 45434 v_1_1_0 := v_1_1.Args[0] 45435 if v_1_1_0.Op != OpAMD64MOVQconst || v_1_1_0.AuxInt != 1 { 45436 break 45437 } 45438 v.reset(OpAMD64SETBstore) 45439 v.AuxInt = off 45440 v.Aux = sym 45441 v.AddArg(ptr) 45442 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 45443 v0.AddArg(x) 45444 v0.AddArg(y) 45445 v.AddArg(v0) 45446 v.AddArg(mem) 45447 return true 45448 } 45449 // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem) 45450 // cond: isUint32PowerOfTwo(c) 45451 // result: (SETBstore [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem) 45452 for { 45453 off := v.AuxInt 45454 sym := v.Aux 45455 mem := v.Args[2] 45456 ptr := v.Args[0] 45457 v_1 := v.Args[1] 45458 if v_1.Op != OpAMD64TESTLconst { 45459 break 45460 } 45461 c := v_1.AuxInt 45462 x := v_1.Args[0] 45463 if !(isUint32PowerOfTwo(c)) { 45464 break 45465 } 45466 v.reset(OpAMD64SETBstore) 45467 v.AuxInt = off 45468 v.Aux = sym 45469 v.AddArg(ptr) 45470 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 45471 v0.AuxInt = log2uint32(c) 45472 v0.AddArg(x) 45473 v.AddArg(v0) 45474 v.AddArg(mem) 45475 return true 45476 } 45477 // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem) 45478 // cond: isUint64PowerOfTwo(c) 45479 // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 45480 for { 45481 off := v.AuxInt 45482 sym := v.Aux 45483 mem := v.Args[2] 45484 ptr := v.Args[0] 45485 v_1 := v.Args[1] 45486 if v_1.Op != OpAMD64TESTQconst { 45487 break 45488 } 45489 c := v_1.AuxInt 45490 x := v_1.Args[0] 45491 if !(isUint64PowerOfTwo(c)) { 45492 break 45493 } 45494 v.reset(OpAMD64SETBstore) 45495 v.AuxInt = off 45496 v.Aux = sym 45497 v.AddArg(ptr) 45498 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45499 v0.AuxInt = log2(c) 45500 v0.AddArg(x) 45501 v.AddArg(v0) 45502 v.AddArg(mem) 45503 return true 45504 } 45505 // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 45506 // cond: isUint64PowerOfTwo(c) 45507 // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 45508 for { 45509 off := v.AuxInt 45510 sym := v.Aux 45511 mem := v.Args[2] 45512 ptr := v.Args[0] 45513 v_1 := v.Args[1] 45514 if v_1.Op != OpAMD64TESTQ { 45515 break 45516 } 45517 x := v_1.Args[1] 45518 v_1_0 := v_1.Args[0] 45519 if v_1_0.Op != OpAMD64MOVQconst { 45520 break 45521 } 45522 c := v_1_0.AuxInt 45523 if !(isUint64PowerOfTwo(c)) { 45524 break 45525 } 45526 v.reset(OpAMD64SETBstore) 45527 v.AuxInt = off 45528 v.Aux = sym 45529 v.AddArg(ptr) 45530 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45531 v0.AuxInt = log2(c) 45532 v0.AddArg(x) 45533 v.AddArg(v0) 45534 v.AddArg(mem) 45535 return true 45536 } 45537 // match: (SETNEstore [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 45538 // cond: isUint64PowerOfTwo(c) 45539 // result: (SETBstore [off] {sym} ptr (BTQconst [log2(c)] x) mem) 45540 for { 45541 off := v.AuxInt 45542 sym := v.Aux 45543 mem := v.Args[2] 45544 ptr := v.Args[0] 45545 v_1 := v.Args[1] 45546 if v_1.Op != OpAMD64TESTQ { 45547 break 45548 } 45549 _ = v_1.Args[1] 45550 x := v_1.Args[0] 45551 v_1_1 := v_1.Args[1] 45552 if v_1_1.Op != OpAMD64MOVQconst { 45553 break 45554 } 45555 c := v_1_1.AuxInt 45556 if !(isUint64PowerOfTwo(c)) { 45557 break 45558 } 45559 v.reset(OpAMD64SETBstore) 45560 v.AuxInt = off 45561 v.Aux = sym 45562 v.AddArg(ptr) 45563 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45564 v0.AuxInt = log2(c) 45565 v0.AddArg(x) 45566 v.AddArg(v0) 45567 v.AddArg(mem) 45568 return true 45569 } 45570 // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) 45571 // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem) 45572 for { 45573 off := v.AuxInt 45574 sym := v.Aux 45575 mem := v.Args[2] 45576 ptr := v.Args[0] 45577 v_1 := v.Args[1] 45578 if v_1.Op != OpAMD64CMPLconst || v_1.AuxInt != 1 { 45579 break 45580 } 45581 s := v_1.Args[0] 45582 if s.Op != OpAMD64ANDLconst || s.AuxInt != 1 { 45583 break 45584 } 45585 v.reset(OpAMD64SETEQstore) 45586 v.AuxInt = off 45587 v.Aux = sym 45588 v.AddArg(ptr) 45589 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45590 v0.AuxInt = 0 45591 v0.AddArg(s) 45592 v.AddArg(v0) 45593 v.AddArg(mem) 45594 return true 45595 } 45596 // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) 45597 // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem) 45598 for { 45599 off := v.AuxInt 45600 sym := v.Aux 45601 mem := v.Args[2] 45602 ptr := v.Args[0] 45603 v_1 := v.Args[1] 45604 if v_1.Op != OpAMD64CMPQconst || v_1.AuxInt != 1 { 45605 break 45606 } 45607 s := v_1.Args[0] 45608 if s.Op != OpAMD64ANDQconst || s.AuxInt != 1 { 45609 break 45610 } 45611 v.reset(OpAMD64SETEQstore) 45612 v.AuxInt = off 45613 v.Aux = sym 45614 v.AddArg(ptr) 45615 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45616 v0.AuxInt = 0 45617 v0.AddArg(s) 45618 v.AddArg(v0) 45619 v.AddArg(mem) 45620 return true 45621 } 45622 return false 45623 } 45624 func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool { 45625 b := v.Block 45626 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) 45627 // cond: z1==z2 45628 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 45629 for { 45630 off := v.AuxInt 45631 sym := v.Aux 45632 mem := v.Args[2] 45633 ptr := v.Args[0] 45634 v_1 := v.Args[1] 45635 if v_1.Op != OpAMD64TESTQ { 45636 break 45637 } 45638 z2 := v_1.Args[1] 45639 z1 := v_1.Args[0] 45640 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { 45641 break 45642 } 45643 z1_0 := z1.Args[0] 45644 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { 45645 break 45646 } 45647 x := z1_0.Args[0] 45648 if !(z1 == z2) { 45649 break 45650 } 45651 v.reset(OpAMD64SETBstore) 45652 v.AuxInt = off 45653 v.Aux = sym 45654 v.AddArg(ptr) 45655 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45656 v0.AuxInt = 63 45657 v0.AddArg(x) 45658 v.AddArg(v0) 45659 v.AddArg(mem) 45660 return true 45661 } 45662 // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem) 45663 // cond: z1==z2 45664 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 45665 for { 45666 off := v.AuxInt 45667 sym := v.Aux 45668 mem := v.Args[2] 45669 ptr := v.Args[0] 45670 v_1 := v.Args[1] 45671 if v_1.Op != OpAMD64TESTQ { 45672 break 45673 } 45674 _ = v_1.Args[1] 45675 z2 := v_1.Args[0] 45676 z1 := v_1.Args[1] 45677 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { 45678 break 45679 } 45680 z1_0 := z1.Args[0] 45681 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { 45682 break 45683 } 45684 x := z1_0.Args[0] 45685 if !(z1 == z2) { 45686 break 45687 } 45688 v.reset(OpAMD64SETBstore) 45689 v.AuxInt = off 45690 v.Aux = sym 45691 v.AddArg(ptr) 45692 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45693 v0.AuxInt = 63 45694 v0.AddArg(x) 45695 v.AddArg(v0) 45696 v.AddArg(mem) 45697 return true 45698 } 45699 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) 45700 // cond: z1==z2 45701 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 45702 for { 45703 off := v.AuxInt 45704 sym := v.Aux 45705 mem := v.Args[2] 45706 ptr := v.Args[0] 45707 v_1 := v.Args[1] 45708 if v_1.Op != OpAMD64TESTL { 45709 break 45710 } 45711 z2 := v_1.Args[1] 45712 z1 := v_1.Args[0] 45713 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { 45714 break 45715 } 45716 z1_0 := z1.Args[0] 45717 if z1_0.Op != OpAMD64SHRLconst || z1_0.AuxInt != 31 { 45718 break 45719 } 45720 x := z1_0.Args[0] 45721 if !(z1 == z2) { 45722 break 45723 } 45724 v.reset(OpAMD64SETBstore) 45725 v.AuxInt = off 45726 v.Aux = sym 45727 v.AddArg(ptr) 45728 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 45729 v0.AuxInt = 31 45730 v0.AddArg(x) 45731 v.AddArg(v0) 45732 v.AddArg(mem) 45733 return true 45734 } 45735 // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem) 45736 // cond: z1==z2 45737 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 45738 for { 45739 off := v.AuxInt 45740 sym := v.Aux 45741 mem := v.Args[2] 45742 ptr := v.Args[0] 45743 v_1 := v.Args[1] 45744 if v_1.Op != OpAMD64TESTL { 45745 break 45746 } 45747 _ = v_1.Args[1] 45748 z2 := v_1.Args[0] 45749 z1 := v_1.Args[1] 45750 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { 45751 break 45752 } 45753 z1_0 := z1.Args[0] 45754 if z1_0.Op != OpAMD64SHRLconst || z1_0.AuxInt != 31 { 45755 break 45756 } 45757 x := z1_0.Args[0] 45758 if !(z1 == z2) { 45759 break 45760 } 45761 v.reset(OpAMD64SETBstore) 45762 v.AuxInt = off 45763 v.Aux = sym 45764 v.AddArg(ptr) 45765 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 45766 v0.AuxInt = 31 45767 v0.AddArg(x) 45768 v.AddArg(v0) 45769 v.AddArg(mem) 45770 return true 45771 } 45772 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) 45773 // cond: z1==z2 45774 // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem) 45775 for { 45776 off := v.AuxInt 45777 sym := v.Aux 45778 mem := v.Args[2] 45779 ptr := v.Args[0] 45780 v_1 := v.Args[1] 45781 if v_1.Op != OpAMD64TESTQ { 45782 break 45783 } 45784 z2 := v_1.Args[1] 45785 z1 := v_1.Args[0] 45786 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 45787 break 45788 } 45789 z1_0 := z1.Args[0] 45790 if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { 45791 break 45792 } 45793 x := z1_0.Args[0] 45794 if !(z1 == z2) { 45795 break 45796 } 45797 v.reset(OpAMD64SETBstore) 45798 v.AuxInt = off 45799 v.Aux = sym 45800 v.AddArg(ptr) 45801 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45802 v0.AuxInt = 0 45803 v0.AddArg(x) 45804 v.AddArg(v0) 45805 v.AddArg(mem) 45806 return true 45807 } 45808 // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem) 45809 // cond: z1==z2 45810 // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem) 45811 for { 45812 off := v.AuxInt 45813 sym := v.Aux 45814 mem := v.Args[2] 45815 ptr := v.Args[0] 45816 v_1 := v.Args[1] 45817 if v_1.Op != OpAMD64TESTQ { 45818 break 45819 } 45820 _ = v_1.Args[1] 45821 z2 := v_1.Args[0] 45822 z1 := v_1.Args[1] 45823 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 45824 break 45825 } 45826 z1_0 := z1.Args[0] 45827 if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { 45828 break 45829 } 45830 x := z1_0.Args[0] 45831 if !(z1 == z2) { 45832 break 45833 } 45834 v.reset(OpAMD64SETBstore) 45835 v.AuxInt = off 45836 v.Aux = sym 45837 v.AddArg(ptr) 45838 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45839 v0.AuxInt = 0 45840 v0.AddArg(x) 45841 v.AddArg(v0) 45842 v.AddArg(mem) 45843 return true 45844 } 45845 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) 45846 // cond: z1==z2 45847 // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem) 45848 for { 45849 off := v.AuxInt 45850 sym := v.Aux 45851 mem := v.Args[2] 45852 ptr := v.Args[0] 45853 v_1 := v.Args[1] 45854 if v_1.Op != OpAMD64TESTL { 45855 break 45856 } 45857 z2 := v_1.Args[1] 45858 z1 := v_1.Args[0] 45859 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 45860 break 45861 } 45862 z1_0 := z1.Args[0] 45863 if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { 45864 break 45865 } 45866 x := z1_0.Args[0] 45867 if !(z1 == z2) { 45868 break 45869 } 45870 v.reset(OpAMD64SETBstore) 45871 v.AuxInt = off 45872 v.Aux = sym 45873 v.AddArg(ptr) 45874 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 45875 v0.AuxInt = 0 45876 v0.AddArg(x) 45877 v.AddArg(v0) 45878 v.AddArg(mem) 45879 return true 45880 } 45881 // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem) 45882 // cond: z1==z2 45883 // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem) 45884 for { 45885 off := v.AuxInt 45886 sym := v.Aux 45887 mem := v.Args[2] 45888 ptr := v.Args[0] 45889 v_1 := v.Args[1] 45890 if v_1.Op != OpAMD64TESTL { 45891 break 45892 } 45893 _ = v_1.Args[1] 45894 z2 := v_1.Args[0] 45895 z1 := v_1.Args[1] 45896 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 45897 break 45898 } 45899 z1_0 := z1.Args[0] 45900 if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { 45901 break 45902 } 45903 x := z1_0.Args[0] 45904 if !(z1 == z2) { 45905 break 45906 } 45907 v.reset(OpAMD64SETBstore) 45908 v.AuxInt = off 45909 v.Aux = sym 45910 v.AddArg(ptr) 45911 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 45912 v0.AuxInt = 0 45913 v0.AddArg(x) 45914 v.AddArg(v0) 45915 v.AddArg(mem) 45916 return true 45917 } 45918 // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) 45919 // cond: z1==z2 45920 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 45921 for { 45922 off := v.AuxInt 45923 sym := v.Aux 45924 mem := v.Args[2] 45925 ptr := v.Args[0] 45926 v_1 := v.Args[1] 45927 if v_1.Op != OpAMD64TESTQ { 45928 break 45929 } 45930 z2 := v_1.Args[1] 45931 z1 := v_1.Args[0] 45932 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 45933 break 45934 } 45935 x := z1.Args[0] 45936 if !(z1 == z2) { 45937 break 45938 } 45939 v.reset(OpAMD64SETBstore) 45940 v.AuxInt = off 45941 v.Aux = sym 45942 v.AddArg(ptr) 45943 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45944 v0.AuxInt = 63 45945 v0.AddArg(x) 45946 v.AddArg(v0) 45947 v.AddArg(mem) 45948 return true 45949 } 45950 // match: (SETNEstore [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem) 45951 // cond: z1==z2 45952 // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) 45953 for { 45954 off := v.AuxInt 45955 sym := v.Aux 45956 mem := v.Args[2] 45957 ptr := v.Args[0] 45958 v_1 := v.Args[1] 45959 if v_1.Op != OpAMD64TESTQ { 45960 break 45961 } 45962 _ = v_1.Args[1] 45963 z2 := v_1.Args[0] 45964 z1 := v_1.Args[1] 45965 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 45966 break 45967 } 45968 x := z1.Args[0] 45969 if !(z1 == z2) { 45970 break 45971 } 45972 v.reset(OpAMD64SETBstore) 45973 v.AuxInt = off 45974 v.Aux = sym 45975 v.AddArg(ptr) 45976 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 45977 v0.AuxInt = 63 45978 v0.AddArg(x) 45979 v.AddArg(v0) 45980 v.AddArg(mem) 45981 return true 45982 } 45983 return false 45984 } 45985 func rewriteValueAMD64_OpAMD64SETNEstore_20(v *Value) bool { 45986 b := v.Block 45987 typ := &b.Func.Config.Types 45988 // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) 45989 // cond: z1==z2 45990 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 45991 for { 45992 off := v.AuxInt 45993 sym := v.Aux 45994 mem := v.Args[2] 45995 ptr := v.Args[0] 45996 v_1 := v.Args[1] 45997 if v_1.Op != OpAMD64TESTL { 45998 break 45999 } 46000 z2 := v_1.Args[1] 46001 z1 := v_1.Args[0] 46002 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 46003 break 46004 } 46005 x := z1.Args[0] 46006 if !(z1 == z2) { 46007 break 46008 } 46009 v.reset(OpAMD64SETBstore) 46010 v.AuxInt = off 46011 v.Aux = sym 46012 v.AddArg(ptr) 46013 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 46014 v0.AuxInt = 31 46015 v0.AddArg(x) 46016 v.AddArg(v0) 46017 v.AddArg(mem) 46018 return true 46019 } 46020 // match: (SETNEstore [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem) 46021 // cond: z1==z2 46022 // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) 46023 for { 46024 off := v.AuxInt 46025 sym := v.Aux 46026 mem := v.Args[2] 46027 ptr := v.Args[0] 46028 v_1 := v.Args[1] 46029 if v_1.Op != OpAMD64TESTL { 46030 break 46031 } 46032 _ = v_1.Args[1] 46033 z2 := v_1.Args[0] 46034 z1 := v_1.Args[1] 46035 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 46036 break 46037 } 46038 x := z1.Args[0] 46039 if !(z1 == z2) { 46040 break 46041 } 46042 v.reset(OpAMD64SETBstore) 46043 v.AuxInt = off 46044 v.Aux = sym 46045 v.AddArg(ptr) 46046 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 46047 v0.AuxInt = 31 46048 v0.AddArg(x) 46049 v.AddArg(v0) 46050 v.AddArg(mem) 46051 return true 46052 } 46053 // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem) 46054 // result: (SETNEstore [off] {sym} ptr x mem) 46055 for { 46056 off := v.AuxInt 46057 sym := v.Aux 46058 mem := v.Args[2] 46059 ptr := v.Args[0] 46060 v_1 := v.Args[1] 46061 if v_1.Op != OpAMD64InvertFlags { 46062 break 46063 } 46064 x := v_1.Args[0] 46065 v.reset(OpAMD64SETNEstore) 46066 v.AuxInt = off 46067 v.Aux = sym 46068 v.AddArg(ptr) 46069 v.AddArg(x) 46070 v.AddArg(mem) 46071 return true 46072 } 46073 // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem) 46074 // cond: is32Bit(off1+off2) 46075 // result: (SETNEstore [off1+off2] {sym} base val mem) 46076 for { 46077 off1 := v.AuxInt 46078 sym := v.Aux 46079 mem := v.Args[2] 46080 v_0 := v.Args[0] 46081 if v_0.Op != OpAMD64ADDQconst { 46082 break 46083 } 46084 off2 := v_0.AuxInt 46085 base := v_0.Args[0] 46086 val := v.Args[1] 46087 if !(is32Bit(off1 + off2)) { 46088 break 46089 } 46090 v.reset(OpAMD64SETNEstore) 46091 v.AuxInt = off1 + off2 46092 v.Aux = sym 46093 v.AddArg(base) 46094 v.AddArg(val) 46095 v.AddArg(mem) 46096 return true 46097 } 46098 // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 46099 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 46100 // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 46101 for { 46102 off1 := v.AuxInt 46103 sym1 := v.Aux 46104 mem := v.Args[2] 46105 v_0 := v.Args[0] 46106 if v_0.Op != OpAMD64LEAQ { 46107 break 46108 } 46109 off2 := v_0.AuxInt 46110 sym2 := v_0.Aux 46111 base := v_0.Args[0] 46112 val := v.Args[1] 46113 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 46114 break 46115 } 46116 v.reset(OpAMD64SETNEstore) 46117 v.AuxInt = off1 + off2 46118 v.Aux = mergeSym(sym1, sym2) 46119 v.AddArg(base) 46120 v.AddArg(val) 46121 v.AddArg(mem) 46122 return true 46123 } 46124 // match: (SETNEstore [off] {sym} ptr (FlagEQ) mem) 46125 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem) 46126 for { 46127 off := v.AuxInt 46128 sym := v.Aux 46129 mem := v.Args[2] 46130 ptr := v.Args[0] 46131 v_1 := v.Args[1] 46132 if v_1.Op != OpAMD64FlagEQ { 46133 break 46134 } 46135 v.reset(OpAMD64MOVBstore) 46136 v.AuxInt = off 46137 v.Aux = sym 46138 v.AddArg(ptr) 46139 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 46140 v0.AuxInt = 0 46141 v.AddArg(v0) 46142 v.AddArg(mem) 46143 return true 46144 } 46145 // match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) 46146 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 46147 for { 46148 off := v.AuxInt 46149 sym := v.Aux 46150 mem := v.Args[2] 46151 ptr := v.Args[0] 46152 v_1 := v.Args[1] 46153 if v_1.Op != OpAMD64FlagLT_ULT { 46154 break 46155 } 46156 v.reset(OpAMD64MOVBstore) 46157 v.AuxInt = off 46158 v.Aux = sym 46159 v.AddArg(ptr) 46160 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 46161 v0.AuxInt = 1 46162 v.AddArg(v0) 46163 v.AddArg(mem) 46164 return true 46165 } 46166 // match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) 46167 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 46168 for { 46169 off := v.AuxInt 46170 sym := v.Aux 46171 mem := v.Args[2] 46172 ptr := v.Args[0] 46173 v_1 := v.Args[1] 46174 if v_1.Op != OpAMD64FlagLT_UGT { 46175 break 46176 } 46177 v.reset(OpAMD64MOVBstore) 46178 v.AuxInt = off 46179 v.Aux = sym 46180 v.AddArg(ptr) 46181 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 46182 v0.AuxInt = 1 46183 v.AddArg(v0) 46184 v.AddArg(mem) 46185 return true 46186 } 46187 // match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) 46188 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 46189 for { 46190 off := v.AuxInt 46191 sym := v.Aux 46192 mem := v.Args[2] 46193 ptr := v.Args[0] 46194 v_1 := v.Args[1] 46195 if v_1.Op != OpAMD64FlagGT_ULT { 46196 break 46197 } 46198 v.reset(OpAMD64MOVBstore) 46199 v.AuxInt = off 46200 v.Aux = sym 46201 v.AddArg(ptr) 46202 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 46203 v0.AuxInt = 1 46204 v.AddArg(v0) 46205 v.AddArg(mem) 46206 return true 46207 } 46208 // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) 46209 // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem) 46210 for { 46211 off := v.AuxInt 46212 sym := v.Aux 46213 mem := v.Args[2] 46214 ptr := v.Args[0] 46215 v_1 := v.Args[1] 46216 if v_1.Op != OpAMD64FlagGT_UGT { 46217 break 46218 } 46219 v.reset(OpAMD64MOVBstore) 46220 v.AuxInt = off 46221 v.Aux = sym 46222 v.AddArg(ptr) 46223 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) 46224 v0.AuxInt = 1 46225 v.AddArg(v0) 46226 v.AddArg(mem) 46227 return true 46228 } 46229 return false 46230 } 46231 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { 46232 b := v.Block 46233 // match: (SHLL x (MOVQconst [c])) 46234 // result: (SHLLconst [c&31] x) 46235 for { 46236 _ = v.Args[1] 46237 x := v.Args[0] 46238 v_1 := v.Args[1] 46239 if v_1.Op != OpAMD64MOVQconst { 46240 break 46241 } 46242 c := v_1.AuxInt 46243 v.reset(OpAMD64SHLLconst) 46244 v.AuxInt = c & 31 46245 v.AddArg(x) 46246 return true 46247 } 46248 // match: (SHLL x (MOVLconst [c])) 46249 // result: (SHLLconst [c&31] x) 46250 for { 46251 _ = v.Args[1] 46252 x := v.Args[0] 46253 v_1 := v.Args[1] 46254 if v_1.Op != OpAMD64MOVLconst { 46255 break 46256 } 46257 c := v_1.AuxInt 46258 v.reset(OpAMD64SHLLconst) 46259 v.AuxInt = c & 31 46260 v.AddArg(x) 46261 return true 46262 } 46263 // match: (SHLL x (ADDQconst [c] y)) 46264 // cond: c & 31 == 0 46265 // result: (SHLL x y) 46266 for { 46267 _ = v.Args[1] 46268 x := v.Args[0] 46269 v_1 := v.Args[1] 46270 if v_1.Op != OpAMD64ADDQconst { 46271 break 46272 } 46273 c := v_1.AuxInt 46274 y := v_1.Args[0] 46275 if !(c&31 == 0) { 46276 break 46277 } 46278 v.reset(OpAMD64SHLL) 46279 v.AddArg(x) 46280 v.AddArg(y) 46281 return true 46282 } 46283 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 46284 // cond: c & 31 == 0 46285 // result: (SHLL x (NEGQ <t> y)) 46286 for { 46287 _ = v.Args[1] 46288 x := v.Args[0] 46289 v_1 := v.Args[1] 46290 if v_1.Op != OpAMD64NEGQ { 46291 break 46292 } 46293 t := v_1.Type 46294 v_1_0 := v_1.Args[0] 46295 if v_1_0.Op != OpAMD64ADDQconst { 46296 break 46297 } 46298 c := v_1_0.AuxInt 46299 y := v_1_0.Args[0] 46300 if !(c&31 == 0) { 46301 break 46302 } 46303 v.reset(OpAMD64SHLL) 46304 v.AddArg(x) 46305 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 46306 v0.AddArg(y) 46307 v.AddArg(v0) 46308 return true 46309 } 46310 // match: (SHLL x (ANDQconst [c] y)) 46311 // cond: c & 31 == 31 46312 // result: (SHLL x y) 46313 for { 46314 _ = v.Args[1] 46315 x := v.Args[0] 46316 v_1 := v.Args[1] 46317 if v_1.Op != OpAMD64ANDQconst { 46318 break 46319 } 46320 c := v_1.AuxInt 46321 y := v_1.Args[0] 46322 if !(c&31 == 31) { 46323 break 46324 } 46325 v.reset(OpAMD64SHLL) 46326 v.AddArg(x) 46327 v.AddArg(y) 46328 return true 46329 } 46330 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 46331 // cond: c & 31 == 31 46332 // result: (SHLL x (NEGQ <t> y)) 46333 for { 46334 _ = v.Args[1] 46335 x := v.Args[0] 46336 v_1 := v.Args[1] 46337 if v_1.Op != OpAMD64NEGQ { 46338 break 46339 } 46340 t := v_1.Type 46341 v_1_0 := v_1.Args[0] 46342 if v_1_0.Op != OpAMD64ANDQconst { 46343 break 46344 } 46345 c := v_1_0.AuxInt 46346 y := v_1_0.Args[0] 46347 if !(c&31 == 31) { 46348 break 46349 } 46350 v.reset(OpAMD64SHLL) 46351 v.AddArg(x) 46352 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 46353 v0.AddArg(y) 46354 v.AddArg(v0) 46355 return true 46356 } 46357 // match: (SHLL x (ADDLconst [c] y)) 46358 // cond: c & 31 == 0 46359 // result: (SHLL x y) 46360 for { 46361 _ = v.Args[1] 46362 x := v.Args[0] 46363 v_1 := v.Args[1] 46364 if v_1.Op != OpAMD64ADDLconst { 46365 break 46366 } 46367 c := v_1.AuxInt 46368 y := v_1.Args[0] 46369 if !(c&31 == 0) { 46370 break 46371 } 46372 v.reset(OpAMD64SHLL) 46373 v.AddArg(x) 46374 v.AddArg(y) 46375 return true 46376 } 46377 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 46378 // cond: c & 31 == 0 46379 // result: (SHLL x (NEGL <t> y)) 46380 for { 46381 _ = v.Args[1] 46382 x := v.Args[0] 46383 v_1 := v.Args[1] 46384 if v_1.Op != OpAMD64NEGL { 46385 break 46386 } 46387 t := v_1.Type 46388 v_1_0 := v_1.Args[0] 46389 if v_1_0.Op != OpAMD64ADDLconst { 46390 break 46391 } 46392 c := v_1_0.AuxInt 46393 y := v_1_0.Args[0] 46394 if !(c&31 == 0) { 46395 break 46396 } 46397 v.reset(OpAMD64SHLL) 46398 v.AddArg(x) 46399 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 46400 v0.AddArg(y) 46401 v.AddArg(v0) 46402 return true 46403 } 46404 // match: (SHLL x (ANDLconst [c] y)) 46405 // cond: c & 31 == 31 46406 // result: (SHLL x y) 46407 for { 46408 _ = v.Args[1] 46409 x := v.Args[0] 46410 v_1 := v.Args[1] 46411 if v_1.Op != OpAMD64ANDLconst { 46412 break 46413 } 46414 c := v_1.AuxInt 46415 y := v_1.Args[0] 46416 if !(c&31 == 31) { 46417 break 46418 } 46419 v.reset(OpAMD64SHLL) 46420 v.AddArg(x) 46421 v.AddArg(y) 46422 return true 46423 } 46424 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 46425 // cond: c & 31 == 31 46426 // result: (SHLL x (NEGL <t> y)) 46427 for { 46428 _ = v.Args[1] 46429 x := v.Args[0] 46430 v_1 := v.Args[1] 46431 if v_1.Op != OpAMD64NEGL { 46432 break 46433 } 46434 t := v_1.Type 46435 v_1_0 := v_1.Args[0] 46436 if v_1_0.Op != OpAMD64ANDLconst { 46437 break 46438 } 46439 c := v_1_0.AuxInt 46440 y := v_1_0.Args[0] 46441 if !(c&31 == 31) { 46442 break 46443 } 46444 v.reset(OpAMD64SHLL) 46445 v.AddArg(x) 46446 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 46447 v0.AddArg(y) 46448 v.AddArg(v0) 46449 return true 46450 } 46451 return false 46452 } 46453 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { 46454 // match: (SHLLconst [1] (SHRLconst [1] x)) 46455 // result: (BTRLconst [0] x) 46456 for { 46457 if v.AuxInt != 1 { 46458 break 46459 } 46460 v_0 := v.Args[0] 46461 if v_0.Op != OpAMD64SHRLconst || v_0.AuxInt != 1 { 46462 break 46463 } 46464 x := v_0.Args[0] 46465 v.reset(OpAMD64BTRLconst) 46466 v.AuxInt = 0 46467 v.AddArg(x) 46468 return true 46469 } 46470 // match: (SHLLconst x [0]) 46471 // result: x 46472 for { 46473 if v.AuxInt != 0 { 46474 break 46475 } 46476 x := v.Args[0] 46477 v.reset(OpCopy) 46478 v.Type = x.Type 46479 v.AddArg(x) 46480 return true 46481 } 46482 return false 46483 } 46484 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { 46485 b := v.Block 46486 // match: (SHLQ x (MOVQconst [c])) 46487 // result: (SHLQconst [c&63] x) 46488 for { 46489 _ = v.Args[1] 46490 x := v.Args[0] 46491 v_1 := v.Args[1] 46492 if v_1.Op != OpAMD64MOVQconst { 46493 break 46494 } 46495 c := v_1.AuxInt 46496 v.reset(OpAMD64SHLQconst) 46497 v.AuxInt = c & 63 46498 v.AddArg(x) 46499 return true 46500 } 46501 // match: (SHLQ x (MOVLconst [c])) 46502 // result: (SHLQconst [c&63] x) 46503 for { 46504 _ = v.Args[1] 46505 x := v.Args[0] 46506 v_1 := v.Args[1] 46507 if v_1.Op != OpAMD64MOVLconst { 46508 break 46509 } 46510 c := v_1.AuxInt 46511 v.reset(OpAMD64SHLQconst) 46512 v.AuxInt = c & 63 46513 v.AddArg(x) 46514 return true 46515 } 46516 // match: (SHLQ x (ADDQconst [c] y)) 46517 // cond: c & 63 == 0 46518 // result: (SHLQ x y) 46519 for { 46520 _ = v.Args[1] 46521 x := v.Args[0] 46522 v_1 := v.Args[1] 46523 if v_1.Op != OpAMD64ADDQconst { 46524 break 46525 } 46526 c := v_1.AuxInt 46527 y := v_1.Args[0] 46528 if !(c&63 == 0) { 46529 break 46530 } 46531 v.reset(OpAMD64SHLQ) 46532 v.AddArg(x) 46533 v.AddArg(y) 46534 return true 46535 } 46536 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 46537 // cond: c & 63 == 0 46538 // result: (SHLQ x (NEGQ <t> y)) 46539 for { 46540 _ = v.Args[1] 46541 x := v.Args[0] 46542 v_1 := v.Args[1] 46543 if v_1.Op != OpAMD64NEGQ { 46544 break 46545 } 46546 t := v_1.Type 46547 v_1_0 := v_1.Args[0] 46548 if v_1_0.Op != OpAMD64ADDQconst { 46549 break 46550 } 46551 c := v_1_0.AuxInt 46552 y := v_1_0.Args[0] 46553 if !(c&63 == 0) { 46554 break 46555 } 46556 v.reset(OpAMD64SHLQ) 46557 v.AddArg(x) 46558 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 46559 v0.AddArg(y) 46560 v.AddArg(v0) 46561 return true 46562 } 46563 // match: (SHLQ x (ANDQconst [c] y)) 46564 // cond: c & 63 == 63 46565 // result: (SHLQ x y) 46566 for { 46567 _ = v.Args[1] 46568 x := v.Args[0] 46569 v_1 := v.Args[1] 46570 if v_1.Op != OpAMD64ANDQconst { 46571 break 46572 } 46573 c := v_1.AuxInt 46574 y := v_1.Args[0] 46575 if !(c&63 == 63) { 46576 break 46577 } 46578 v.reset(OpAMD64SHLQ) 46579 v.AddArg(x) 46580 v.AddArg(y) 46581 return true 46582 } 46583 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 46584 // cond: c & 63 == 63 46585 // result: (SHLQ x (NEGQ <t> y)) 46586 for { 46587 _ = v.Args[1] 46588 x := v.Args[0] 46589 v_1 := v.Args[1] 46590 if v_1.Op != OpAMD64NEGQ { 46591 break 46592 } 46593 t := v_1.Type 46594 v_1_0 := v_1.Args[0] 46595 if v_1_0.Op != OpAMD64ANDQconst { 46596 break 46597 } 46598 c := v_1_0.AuxInt 46599 y := v_1_0.Args[0] 46600 if !(c&63 == 63) { 46601 break 46602 } 46603 v.reset(OpAMD64SHLQ) 46604 v.AddArg(x) 46605 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 46606 v0.AddArg(y) 46607 v.AddArg(v0) 46608 return true 46609 } 46610 // match: (SHLQ x (ADDLconst [c] y)) 46611 // cond: c & 63 == 0 46612 // result: (SHLQ x y) 46613 for { 46614 _ = v.Args[1] 46615 x := v.Args[0] 46616 v_1 := v.Args[1] 46617 if v_1.Op != OpAMD64ADDLconst { 46618 break 46619 } 46620 c := v_1.AuxInt 46621 y := v_1.Args[0] 46622 if !(c&63 == 0) { 46623 break 46624 } 46625 v.reset(OpAMD64SHLQ) 46626 v.AddArg(x) 46627 v.AddArg(y) 46628 return true 46629 } 46630 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 46631 // cond: c & 63 == 0 46632 // result: (SHLQ x (NEGL <t> y)) 46633 for { 46634 _ = v.Args[1] 46635 x := v.Args[0] 46636 v_1 := v.Args[1] 46637 if v_1.Op != OpAMD64NEGL { 46638 break 46639 } 46640 t := v_1.Type 46641 v_1_0 := v_1.Args[0] 46642 if v_1_0.Op != OpAMD64ADDLconst { 46643 break 46644 } 46645 c := v_1_0.AuxInt 46646 y := v_1_0.Args[0] 46647 if !(c&63 == 0) { 46648 break 46649 } 46650 v.reset(OpAMD64SHLQ) 46651 v.AddArg(x) 46652 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 46653 v0.AddArg(y) 46654 v.AddArg(v0) 46655 return true 46656 } 46657 // match: (SHLQ x (ANDLconst [c] y)) 46658 // cond: c & 63 == 63 46659 // result: (SHLQ x y) 46660 for { 46661 _ = v.Args[1] 46662 x := v.Args[0] 46663 v_1 := v.Args[1] 46664 if v_1.Op != OpAMD64ANDLconst { 46665 break 46666 } 46667 c := v_1.AuxInt 46668 y := v_1.Args[0] 46669 if !(c&63 == 63) { 46670 break 46671 } 46672 v.reset(OpAMD64SHLQ) 46673 v.AddArg(x) 46674 v.AddArg(y) 46675 return true 46676 } 46677 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 46678 // cond: c & 63 == 63 46679 // result: (SHLQ x (NEGL <t> y)) 46680 for { 46681 _ = v.Args[1] 46682 x := v.Args[0] 46683 v_1 := v.Args[1] 46684 if v_1.Op != OpAMD64NEGL { 46685 break 46686 } 46687 t := v_1.Type 46688 v_1_0 := v_1.Args[0] 46689 if v_1_0.Op != OpAMD64ANDLconst { 46690 break 46691 } 46692 c := v_1_0.AuxInt 46693 y := v_1_0.Args[0] 46694 if !(c&63 == 63) { 46695 break 46696 } 46697 v.reset(OpAMD64SHLQ) 46698 v.AddArg(x) 46699 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 46700 v0.AddArg(y) 46701 v.AddArg(v0) 46702 return true 46703 } 46704 return false 46705 } 46706 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { 46707 // match: (SHLQconst [1] (SHRQconst [1] x)) 46708 // result: (BTRQconst [0] x) 46709 for { 46710 if v.AuxInt != 1 { 46711 break 46712 } 46713 v_0 := v.Args[0] 46714 if v_0.Op != OpAMD64SHRQconst || v_0.AuxInt != 1 { 46715 break 46716 } 46717 x := v_0.Args[0] 46718 v.reset(OpAMD64BTRQconst) 46719 v.AuxInt = 0 46720 v.AddArg(x) 46721 return true 46722 } 46723 // match: (SHLQconst x [0]) 46724 // result: x 46725 for { 46726 if v.AuxInt != 0 { 46727 break 46728 } 46729 x := v.Args[0] 46730 v.reset(OpCopy) 46731 v.Type = x.Type 46732 v.AddArg(x) 46733 return true 46734 } 46735 return false 46736 } 46737 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { 46738 // match: (SHRB x (MOVQconst [c])) 46739 // cond: c&31 < 8 46740 // result: (SHRBconst [c&31] x) 46741 for { 46742 _ = v.Args[1] 46743 x := v.Args[0] 46744 v_1 := v.Args[1] 46745 if v_1.Op != OpAMD64MOVQconst { 46746 break 46747 } 46748 c := v_1.AuxInt 46749 if !(c&31 < 8) { 46750 break 46751 } 46752 v.reset(OpAMD64SHRBconst) 46753 v.AuxInt = c & 31 46754 v.AddArg(x) 46755 return true 46756 } 46757 // match: (SHRB x (MOVLconst [c])) 46758 // cond: c&31 < 8 46759 // result: (SHRBconst [c&31] x) 46760 for { 46761 _ = v.Args[1] 46762 x := v.Args[0] 46763 v_1 := v.Args[1] 46764 if v_1.Op != OpAMD64MOVLconst { 46765 break 46766 } 46767 c := v_1.AuxInt 46768 if !(c&31 < 8) { 46769 break 46770 } 46771 v.reset(OpAMD64SHRBconst) 46772 v.AuxInt = c & 31 46773 v.AddArg(x) 46774 return true 46775 } 46776 // match: (SHRB _ (MOVQconst [c])) 46777 // cond: c&31 >= 8 46778 // result: (MOVLconst [0]) 46779 for { 46780 _ = v.Args[1] 46781 v_1 := v.Args[1] 46782 if v_1.Op != OpAMD64MOVQconst { 46783 break 46784 } 46785 c := v_1.AuxInt 46786 if !(c&31 >= 8) { 46787 break 46788 } 46789 v.reset(OpAMD64MOVLconst) 46790 v.AuxInt = 0 46791 return true 46792 } 46793 // match: (SHRB _ (MOVLconst [c])) 46794 // cond: c&31 >= 8 46795 // result: (MOVLconst [0]) 46796 for { 46797 _ = v.Args[1] 46798 v_1 := v.Args[1] 46799 if v_1.Op != OpAMD64MOVLconst { 46800 break 46801 } 46802 c := v_1.AuxInt 46803 if !(c&31 >= 8) { 46804 break 46805 } 46806 v.reset(OpAMD64MOVLconst) 46807 v.AuxInt = 0 46808 return true 46809 } 46810 return false 46811 } 46812 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { 46813 // match: (SHRBconst x [0]) 46814 // result: x 46815 for { 46816 if v.AuxInt != 0 { 46817 break 46818 } 46819 x := v.Args[0] 46820 v.reset(OpCopy) 46821 v.Type = x.Type 46822 v.AddArg(x) 46823 return true 46824 } 46825 return false 46826 } 46827 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { 46828 b := v.Block 46829 // match: (SHRL x (MOVQconst [c])) 46830 // result: (SHRLconst [c&31] x) 46831 for { 46832 _ = v.Args[1] 46833 x := v.Args[0] 46834 v_1 := v.Args[1] 46835 if v_1.Op != OpAMD64MOVQconst { 46836 break 46837 } 46838 c := v_1.AuxInt 46839 v.reset(OpAMD64SHRLconst) 46840 v.AuxInt = c & 31 46841 v.AddArg(x) 46842 return true 46843 } 46844 // match: (SHRL x (MOVLconst [c])) 46845 // result: (SHRLconst [c&31] x) 46846 for { 46847 _ = v.Args[1] 46848 x := v.Args[0] 46849 v_1 := v.Args[1] 46850 if v_1.Op != OpAMD64MOVLconst { 46851 break 46852 } 46853 c := v_1.AuxInt 46854 v.reset(OpAMD64SHRLconst) 46855 v.AuxInt = c & 31 46856 v.AddArg(x) 46857 return true 46858 } 46859 // match: (SHRL x (ADDQconst [c] y)) 46860 // cond: c & 31 == 0 46861 // result: (SHRL x y) 46862 for { 46863 _ = v.Args[1] 46864 x := v.Args[0] 46865 v_1 := v.Args[1] 46866 if v_1.Op != OpAMD64ADDQconst { 46867 break 46868 } 46869 c := v_1.AuxInt 46870 y := v_1.Args[0] 46871 if !(c&31 == 0) { 46872 break 46873 } 46874 v.reset(OpAMD64SHRL) 46875 v.AddArg(x) 46876 v.AddArg(y) 46877 return true 46878 } 46879 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 46880 // cond: c & 31 == 0 46881 // result: (SHRL x (NEGQ <t> y)) 46882 for { 46883 _ = v.Args[1] 46884 x := v.Args[0] 46885 v_1 := v.Args[1] 46886 if v_1.Op != OpAMD64NEGQ { 46887 break 46888 } 46889 t := v_1.Type 46890 v_1_0 := v_1.Args[0] 46891 if v_1_0.Op != OpAMD64ADDQconst { 46892 break 46893 } 46894 c := v_1_0.AuxInt 46895 y := v_1_0.Args[0] 46896 if !(c&31 == 0) { 46897 break 46898 } 46899 v.reset(OpAMD64SHRL) 46900 v.AddArg(x) 46901 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 46902 v0.AddArg(y) 46903 v.AddArg(v0) 46904 return true 46905 } 46906 // match: (SHRL x (ANDQconst [c] y)) 46907 // cond: c & 31 == 31 46908 // result: (SHRL x y) 46909 for { 46910 _ = v.Args[1] 46911 x := v.Args[0] 46912 v_1 := v.Args[1] 46913 if v_1.Op != OpAMD64ANDQconst { 46914 break 46915 } 46916 c := v_1.AuxInt 46917 y := v_1.Args[0] 46918 if !(c&31 == 31) { 46919 break 46920 } 46921 v.reset(OpAMD64SHRL) 46922 v.AddArg(x) 46923 v.AddArg(y) 46924 return true 46925 } 46926 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 46927 // cond: c & 31 == 31 46928 // result: (SHRL x (NEGQ <t> y)) 46929 for { 46930 _ = v.Args[1] 46931 x := v.Args[0] 46932 v_1 := v.Args[1] 46933 if v_1.Op != OpAMD64NEGQ { 46934 break 46935 } 46936 t := v_1.Type 46937 v_1_0 := v_1.Args[0] 46938 if v_1_0.Op != OpAMD64ANDQconst { 46939 break 46940 } 46941 c := v_1_0.AuxInt 46942 y := v_1_0.Args[0] 46943 if !(c&31 == 31) { 46944 break 46945 } 46946 v.reset(OpAMD64SHRL) 46947 v.AddArg(x) 46948 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 46949 v0.AddArg(y) 46950 v.AddArg(v0) 46951 return true 46952 } 46953 // match: (SHRL x (ADDLconst [c] y)) 46954 // cond: c & 31 == 0 46955 // result: (SHRL x y) 46956 for { 46957 _ = v.Args[1] 46958 x := v.Args[0] 46959 v_1 := v.Args[1] 46960 if v_1.Op != OpAMD64ADDLconst { 46961 break 46962 } 46963 c := v_1.AuxInt 46964 y := v_1.Args[0] 46965 if !(c&31 == 0) { 46966 break 46967 } 46968 v.reset(OpAMD64SHRL) 46969 v.AddArg(x) 46970 v.AddArg(y) 46971 return true 46972 } 46973 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 46974 // cond: c & 31 == 0 46975 // result: (SHRL x (NEGL <t> y)) 46976 for { 46977 _ = v.Args[1] 46978 x := v.Args[0] 46979 v_1 := v.Args[1] 46980 if v_1.Op != OpAMD64NEGL { 46981 break 46982 } 46983 t := v_1.Type 46984 v_1_0 := v_1.Args[0] 46985 if v_1_0.Op != OpAMD64ADDLconst { 46986 break 46987 } 46988 c := v_1_0.AuxInt 46989 y := v_1_0.Args[0] 46990 if !(c&31 == 0) { 46991 break 46992 } 46993 v.reset(OpAMD64SHRL) 46994 v.AddArg(x) 46995 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 46996 v0.AddArg(y) 46997 v.AddArg(v0) 46998 return true 46999 } 47000 // match: (SHRL x (ANDLconst [c] y)) 47001 // cond: c & 31 == 31 47002 // result: (SHRL x y) 47003 for { 47004 _ = v.Args[1] 47005 x := v.Args[0] 47006 v_1 := v.Args[1] 47007 if v_1.Op != OpAMD64ANDLconst { 47008 break 47009 } 47010 c := v_1.AuxInt 47011 y := v_1.Args[0] 47012 if !(c&31 == 31) { 47013 break 47014 } 47015 v.reset(OpAMD64SHRL) 47016 v.AddArg(x) 47017 v.AddArg(y) 47018 return true 47019 } 47020 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 47021 // cond: c & 31 == 31 47022 // result: (SHRL x (NEGL <t> y)) 47023 for { 47024 _ = v.Args[1] 47025 x := v.Args[0] 47026 v_1 := v.Args[1] 47027 if v_1.Op != OpAMD64NEGL { 47028 break 47029 } 47030 t := v_1.Type 47031 v_1_0 := v_1.Args[0] 47032 if v_1_0.Op != OpAMD64ANDLconst { 47033 break 47034 } 47035 c := v_1_0.AuxInt 47036 y := v_1_0.Args[0] 47037 if !(c&31 == 31) { 47038 break 47039 } 47040 v.reset(OpAMD64SHRL) 47041 v.AddArg(x) 47042 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 47043 v0.AddArg(y) 47044 v.AddArg(v0) 47045 return true 47046 } 47047 return false 47048 } 47049 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { 47050 // match: (SHRLconst [1] (SHLLconst [1] x)) 47051 // result: (BTRLconst [31] x) 47052 for { 47053 if v.AuxInt != 1 { 47054 break 47055 } 47056 v_0 := v.Args[0] 47057 if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 1 { 47058 break 47059 } 47060 x := v_0.Args[0] 47061 v.reset(OpAMD64BTRLconst) 47062 v.AuxInt = 31 47063 v.AddArg(x) 47064 return true 47065 } 47066 // match: (SHRLconst x [0]) 47067 // result: x 47068 for { 47069 if v.AuxInt != 0 { 47070 break 47071 } 47072 x := v.Args[0] 47073 v.reset(OpCopy) 47074 v.Type = x.Type 47075 v.AddArg(x) 47076 return true 47077 } 47078 return false 47079 } 47080 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { 47081 b := v.Block 47082 // match: (SHRQ x (MOVQconst [c])) 47083 // result: (SHRQconst [c&63] x) 47084 for { 47085 _ = v.Args[1] 47086 x := v.Args[0] 47087 v_1 := v.Args[1] 47088 if v_1.Op != OpAMD64MOVQconst { 47089 break 47090 } 47091 c := v_1.AuxInt 47092 v.reset(OpAMD64SHRQconst) 47093 v.AuxInt = c & 63 47094 v.AddArg(x) 47095 return true 47096 } 47097 // match: (SHRQ x (MOVLconst [c])) 47098 // result: (SHRQconst [c&63] x) 47099 for { 47100 _ = v.Args[1] 47101 x := v.Args[0] 47102 v_1 := v.Args[1] 47103 if v_1.Op != OpAMD64MOVLconst { 47104 break 47105 } 47106 c := v_1.AuxInt 47107 v.reset(OpAMD64SHRQconst) 47108 v.AuxInt = c & 63 47109 v.AddArg(x) 47110 return true 47111 } 47112 // match: (SHRQ x (ADDQconst [c] y)) 47113 // cond: c & 63 == 0 47114 // result: (SHRQ x y) 47115 for { 47116 _ = v.Args[1] 47117 x := v.Args[0] 47118 v_1 := v.Args[1] 47119 if v_1.Op != OpAMD64ADDQconst { 47120 break 47121 } 47122 c := v_1.AuxInt 47123 y := v_1.Args[0] 47124 if !(c&63 == 0) { 47125 break 47126 } 47127 v.reset(OpAMD64SHRQ) 47128 v.AddArg(x) 47129 v.AddArg(y) 47130 return true 47131 } 47132 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 47133 // cond: c & 63 == 0 47134 // result: (SHRQ x (NEGQ <t> y)) 47135 for { 47136 _ = v.Args[1] 47137 x := v.Args[0] 47138 v_1 := v.Args[1] 47139 if v_1.Op != OpAMD64NEGQ { 47140 break 47141 } 47142 t := v_1.Type 47143 v_1_0 := v_1.Args[0] 47144 if v_1_0.Op != OpAMD64ADDQconst { 47145 break 47146 } 47147 c := v_1_0.AuxInt 47148 y := v_1_0.Args[0] 47149 if !(c&63 == 0) { 47150 break 47151 } 47152 v.reset(OpAMD64SHRQ) 47153 v.AddArg(x) 47154 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 47155 v0.AddArg(y) 47156 v.AddArg(v0) 47157 return true 47158 } 47159 // match: (SHRQ x (ANDQconst [c] y)) 47160 // cond: c & 63 == 63 47161 // result: (SHRQ x y) 47162 for { 47163 _ = v.Args[1] 47164 x := v.Args[0] 47165 v_1 := v.Args[1] 47166 if v_1.Op != OpAMD64ANDQconst { 47167 break 47168 } 47169 c := v_1.AuxInt 47170 y := v_1.Args[0] 47171 if !(c&63 == 63) { 47172 break 47173 } 47174 v.reset(OpAMD64SHRQ) 47175 v.AddArg(x) 47176 v.AddArg(y) 47177 return true 47178 } 47179 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 47180 // cond: c & 63 == 63 47181 // result: (SHRQ x (NEGQ <t> y)) 47182 for { 47183 _ = v.Args[1] 47184 x := v.Args[0] 47185 v_1 := v.Args[1] 47186 if v_1.Op != OpAMD64NEGQ { 47187 break 47188 } 47189 t := v_1.Type 47190 v_1_0 := v_1.Args[0] 47191 if v_1_0.Op != OpAMD64ANDQconst { 47192 break 47193 } 47194 c := v_1_0.AuxInt 47195 y := v_1_0.Args[0] 47196 if !(c&63 == 63) { 47197 break 47198 } 47199 v.reset(OpAMD64SHRQ) 47200 v.AddArg(x) 47201 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 47202 v0.AddArg(y) 47203 v.AddArg(v0) 47204 return true 47205 } 47206 // match: (SHRQ x (ADDLconst [c] y)) 47207 // cond: c & 63 == 0 47208 // result: (SHRQ x y) 47209 for { 47210 _ = v.Args[1] 47211 x := v.Args[0] 47212 v_1 := v.Args[1] 47213 if v_1.Op != OpAMD64ADDLconst { 47214 break 47215 } 47216 c := v_1.AuxInt 47217 y := v_1.Args[0] 47218 if !(c&63 == 0) { 47219 break 47220 } 47221 v.reset(OpAMD64SHRQ) 47222 v.AddArg(x) 47223 v.AddArg(y) 47224 return true 47225 } 47226 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 47227 // cond: c & 63 == 0 47228 // result: (SHRQ x (NEGL <t> y)) 47229 for { 47230 _ = v.Args[1] 47231 x := v.Args[0] 47232 v_1 := v.Args[1] 47233 if v_1.Op != OpAMD64NEGL { 47234 break 47235 } 47236 t := v_1.Type 47237 v_1_0 := v_1.Args[0] 47238 if v_1_0.Op != OpAMD64ADDLconst { 47239 break 47240 } 47241 c := v_1_0.AuxInt 47242 y := v_1_0.Args[0] 47243 if !(c&63 == 0) { 47244 break 47245 } 47246 v.reset(OpAMD64SHRQ) 47247 v.AddArg(x) 47248 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 47249 v0.AddArg(y) 47250 v.AddArg(v0) 47251 return true 47252 } 47253 // match: (SHRQ x (ANDLconst [c] y)) 47254 // cond: c & 63 == 63 47255 // result: (SHRQ x y) 47256 for { 47257 _ = v.Args[1] 47258 x := v.Args[0] 47259 v_1 := v.Args[1] 47260 if v_1.Op != OpAMD64ANDLconst { 47261 break 47262 } 47263 c := v_1.AuxInt 47264 y := v_1.Args[0] 47265 if !(c&63 == 63) { 47266 break 47267 } 47268 v.reset(OpAMD64SHRQ) 47269 v.AddArg(x) 47270 v.AddArg(y) 47271 return true 47272 } 47273 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 47274 // cond: c & 63 == 63 47275 // result: (SHRQ x (NEGL <t> y)) 47276 for { 47277 _ = v.Args[1] 47278 x := v.Args[0] 47279 v_1 := v.Args[1] 47280 if v_1.Op != OpAMD64NEGL { 47281 break 47282 } 47283 t := v_1.Type 47284 v_1_0 := v_1.Args[0] 47285 if v_1_0.Op != OpAMD64ANDLconst { 47286 break 47287 } 47288 c := v_1_0.AuxInt 47289 y := v_1_0.Args[0] 47290 if !(c&63 == 63) { 47291 break 47292 } 47293 v.reset(OpAMD64SHRQ) 47294 v.AddArg(x) 47295 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 47296 v0.AddArg(y) 47297 v.AddArg(v0) 47298 return true 47299 } 47300 return false 47301 } 47302 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { 47303 // match: (SHRQconst [1] (SHLQconst [1] x)) 47304 // result: (BTRQconst [63] x) 47305 for { 47306 if v.AuxInt != 1 { 47307 break 47308 } 47309 v_0 := v.Args[0] 47310 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 { 47311 break 47312 } 47313 x := v_0.Args[0] 47314 v.reset(OpAMD64BTRQconst) 47315 v.AuxInt = 63 47316 v.AddArg(x) 47317 return true 47318 } 47319 // match: (SHRQconst x [0]) 47320 // result: x 47321 for { 47322 if v.AuxInt != 0 { 47323 break 47324 } 47325 x := v.Args[0] 47326 v.reset(OpCopy) 47327 v.Type = x.Type 47328 v.AddArg(x) 47329 return true 47330 } 47331 return false 47332 } 47333 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { 47334 // match: (SHRW x (MOVQconst [c])) 47335 // cond: c&31 < 16 47336 // result: (SHRWconst [c&31] x) 47337 for { 47338 _ = v.Args[1] 47339 x := v.Args[0] 47340 v_1 := v.Args[1] 47341 if v_1.Op != OpAMD64MOVQconst { 47342 break 47343 } 47344 c := v_1.AuxInt 47345 if !(c&31 < 16) { 47346 break 47347 } 47348 v.reset(OpAMD64SHRWconst) 47349 v.AuxInt = c & 31 47350 v.AddArg(x) 47351 return true 47352 } 47353 // match: (SHRW x (MOVLconst [c])) 47354 // cond: c&31 < 16 47355 // result: (SHRWconst [c&31] x) 47356 for { 47357 _ = v.Args[1] 47358 x := v.Args[0] 47359 v_1 := v.Args[1] 47360 if v_1.Op != OpAMD64MOVLconst { 47361 break 47362 } 47363 c := v_1.AuxInt 47364 if !(c&31 < 16) { 47365 break 47366 } 47367 v.reset(OpAMD64SHRWconst) 47368 v.AuxInt = c & 31 47369 v.AddArg(x) 47370 return true 47371 } 47372 // match: (SHRW _ (MOVQconst [c])) 47373 // cond: c&31 >= 16 47374 // result: (MOVLconst [0]) 47375 for { 47376 _ = v.Args[1] 47377 v_1 := v.Args[1] 47378 if v_1.Op != OpAMD64MOVQconst { 47379 break 47380 } 47381 c := v_1.AuxInt 47382 if !(c&31 >= 16) { 47383 break 47384 } 47385 v.reset(OpAMD64MOVLconst) 47386 v.AuxInt = 0 47387 return true 47388 } 47389 // match: (SHRW _ (MOVLconst [c])) 47390 // cond: c&31 >= 16 47391 // result: (MOVLconst [0]) 47392 for { 47393 _ = v.Args[1] 47394 v_1 := v.Args[1] 47395 if v_1.Op != OpAMD64MOVLconst { 47396 break 47397 } 47398 c := v_1.AuxInt 47399 if !(c&31 >= 16) { 47400 break 47401 } 47402 v.reset(OpAMD64MOVLconst) 47403 v.AuxInt = 0 47404 return true 47405 } 47406 return false 47407 } 47408 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { 47409 // match: (SHRWconst x [0]) 47410 // result: x 47411 for { 47412 if v.AuxInt != 0 { 47413 break 47414 } 47415 x := v.Args[0] 47416 v.reset(OpCopy) 47417 v.Type = x.Type 47418 v.AddArg(x) 47419 return true 47420 } 47421 return false 47422 } 47423 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { 47424 b := v.Block 47425 // match: (SUBL x (MOVLconst [c])) 47426 // result: (SUBLconst x [c]) 47427 for { 47428 _ = v.Args[1] 47429 x := v.Args[0] 47430 v_1 := v.Args[1] 47431 if v_1.Op != OpAMD64MOVLconst { 47432 break 47433 } 47434 c := v_1.AuxInt 47435 v.reset(OpAMD64SUBLconst) 47436 v.AuxInt = c 47437 v.AddArg(x) 47438 return true 47439 } 47440 // match: (SUBL (MOVLconst [c]) x) 47441 // result: (NEGL (SUBLconst <v.Type> x [c])) 47442 for { 47443 x := v.Args[1] 47444 v_0 := v.Args[0] 47445 if v_0.Op != OpAMD64MOVLconst { 47446 break 47447 } 47448 c := v_0.AuxInt 47449 v.reset(OpAMD64NEGL) 47450 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 47451 v0.AuxInt = c 47452 v0.AddArg(x) 47453 v.AddArg(v0) 47454 return true 47455 } 47456 // match: (SUBL x x) 47457 // result: (MOVLconst [0]) 47458 for { 47459 x := v.Args[1] 47460 if x != v.Args[0] { 47461 break 47462 } 47463 v.reset(OpAMD64MOVLconst) 47464 v.AuxInt = 0 47465 return true 47466 } 47467 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 47468 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 47469 // result: (SUBLload x [off] {sym} ptr mem) 47470 for { 47471 _ = v.Args[1] 47472 x := v.Args[0] 47473 l := v.Args[1] 47474 if l.Op != OpAMD64MOVLload { 47475 break 47476 } 47477 off := l.AuxInt 47478 sym := l.Aux 47479 mem := l.Args[1] 47480 ptr := l.Args[0] 47481 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 47482 break 47483 } 47484 v.reset(OpAMD64SUBLload) 47485 v.AuxInt = off 47486 v.Aux = sym 47487 v.AddArg(x) 47488 v.AddArg(ptr) 47489 v.AddArg(mem) 47490 return true 47491 } 47492 return false 47493 } 47494 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { 47495 // match: (SUBLconst [c] x) 47496 // cond: int32(c) == 0 47497 // result: x 47498 for { 47499 c := v.AuxInt 47500 x := v.Args[0] 47501 if !(int32(c) == 0) { 47502 break 47503 } 47504 v.reset(OpCopy) 47505 v.Type = x.Type 47506 v.AddArg(x) 47507 return true 47508 } 47509 // match: (SUBLconst [c] x) 47510 // result: (ADDLconst [int64(int32(-c))] x) 47511 for { 47512 c := v.AuxInt 47513 x := v.Args[0] 47514 v.reset(OpAMD64ADDLconst) 47515 v.AuxInt = int64(int32(-c)) 47516 v.AddArg(x) 47517 return true 47518 } 47519 } 47520 func rewriteValueAMD64_OpAMD64SUBLload_0(v *Value) bool { 47521 b := v.Block 47522 typ := &b.Func.Config.Types 47523 // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem) 47524 // cond: is32Bit(off1+off2) 47525 // result: (SUBLload [off1+off2] {sym} val base mem) 47526 for { 47527 off1 := v.AuxInt 47528 sym := v.Aux 47529 mem := v.Args[2] 47530 val := v.Args[0] 47531 v_1 := v.Args[1] 47532 if v_1.Op != OpAMD64ADDQconst { 47533 break 47534 } 47535 off2 := v_1.AuxInt 47536 base := v_1.Args[0] 47537 if !(is32Bit(off1 + off2)) { 47538 break 47539 } 47540 v.reset(OpAMD64SUBLload) 47541 v.AuxInt = off1 + off2 47542 v.Aux = sym 47543 v.AddArg(val) 47544 v.AddArg(base) 47545 v.AddArg(mem) 47546 return true 47547 } 47548 // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 47549 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 47550 // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 47551 for { 47552 off1 := v.AuxInt 47553 sym1 := v.Aux 47554 mem := v.Args[2] 47555 val := v.Args[0] 47556 v_1 := v.Args[1] 47557 if v_1.Op != OpAMD64LEAQ { 47558 break 47559 } 47560 off2 := v_1.AuxInt 47561 sym2 := v_1.Aux 47562 base := v_1.Args[0] 47563 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 47564 break 47565 } 47566 v.reset(OpAMD64SUBLload) 47567 v.AuxInt = off1 + off2 47568 v.Aux = mergeSym(sym1, sym2) 47569 v.AddArg(val) 47570 v.AddArg(base) 47571 v.AddArg(mem) 47572 return true 47573 } 47574 // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 47575 // result: (SUBL x (MOVLf2i y)) 47576 for { 47577 off := v.AuxInt 47578 sym := v.Aux 47579 _ = v.Args[2] 47580 x := v.Args[0] 47581 ptr := v.Args[1] 47582 v_2 := v.Args[2] 47583 if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { 47584 break 47585 } 47586 _ = v_2.Args[2] 47587 if ptr != v_2.Args[0] { 47588 break 47589 } 47590 y := v_2.Args[1] 47591 v.reset(OpAMD64SUBL) 47592 v.AddArg(x) 47593 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 47594 v0.AddArg(y) 47595 v.AddArg(v0) 47596 return true 47597 } 47598 return false 47599 } 47600 func rewriteValueAMD64_OpAMD64SUBLmodify_0(v *Value) bool { 47601 // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 47602 // cond: is32Bit(off1+off2) 47603 // result: (SUBLmodify [off1+off2] {sym} base val mem) 47604 for { 47605 off1 := v.AuxInt 47606 sym := v.Aux 47607 mem := v.Args[2] 47608 v_0 := v.Args[0] 47609 if v_0.Op != OpAMD64ADDQconst { 47610 break 47611 } 47612 off2 := v_0.AuxInt 47613 base := v_0.Args[0] 47614 val := v.Args[1] 47615 if !(is32Bit(off1 + off2)) { 47616 break 47617 } 47618 v.reset(OpAMD64SUBLmodify) 47619 v.AuxInt = off1 + off2 47620 v.Aux = sym 47621 v.AddArg(base) 47622 v.AddArg(val) 47623 v.AddArg(mem) 47624 return true 47625 } 47626 // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 47627 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 47628 // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 47629 for { 47630 off1 := v.AuxInt 47631 sym1 := v.Aux 47632 mem := v.Args[2] 47633 v_0 := v.Args[0] 47634 if v_0.Op != OpAMD64LEAQ { 47635 break 47636 } 47637 off2 := v_0.AuxInt 47638 sym2 := v_0.Aux 47639 base := v_0.Args[0] 47640 val := v.Args[1] 47641 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 47642 break 47643 } 47644 v.reset(OpAMD64SUBLmodify) 47645 v.AuxInt = off1 + off2 47646 v.Aux = mergeSym(sym1, sym2) 47647 v.AddArg(base) 47648 v.AddArg(val) 47649 v.AddArg(mem) 47650 return true 47651 } 47652 return false 47653 } 47654 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { 47655 b := v.Block 47656 // match: (SUBQ x (MOVQconst [c])) 47657 // cond: is32Bit(c) 47658 // result: (SUBQconst x [c]) 47659 for { 47660 _ = v.Args[1] 47661 x := v.Args[0] 47662 v_1 := v.Args[1] 47663 if v_1.Op != OpAMD64MOVQconst { 47664 break 47665 } 47666 c := v_1.AuxInt 47667 if !(is32Bit(c)) { 47668 break 47669 } 47670 v.reset(OpAMD64SUBQconst) 47671 v.AuxInt = c 47672 v.AddArg(x) 47673 return true 47674 } 47675 // match: (SUBQ (MOVQconst [c]) x) 47676 // cond: is32Bit(c) 47677 // result: (NEGQ (SUBQconst <v.Type> x [c])) 47678 for { 47679 x := v.Args[1] 47680 v_0 := v.Args[0] 47681 if v_0.Op != OpAMD64MOVQconst { 47682 break 47683 } 47684 c := v_0.AuxInt 47685 if !(is32Bit(c)) { 47686 break 47687 } 47688 v.reset(OpAMD64NEGQ) 47689 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 47690 v0.AuxInt = c 47691 v0.AddArg(x) 47692 v.AddArg(v0) 47693 return true 47694 } 47695 // match: (SUBQ x x) 47696 // result: (MOVQconst [0]) 47697 for { 47698 x := v.Args[1] 47699 if x != v.Args[0] { 47700 break 47701 } 47702 v.reset(OpAMD64MOVQconst) 47703 v.AuxInt = 0 47704 return true 47705 } 47706 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 47707 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 47708 // result: (SUBQload x [off] {sym} ptr mem) 47709 for { 47710 _ = v.Args[1] 47711 x := v.Args[0] 47712 l := v.Args[1] 47713 if l.Op != OpAMD64MOVQload { 47714 break 47715 } 47716 off := l.AuxInt 47717 sym := l.Aux 47718 mem := l.Args[1] 47719 ptr := l.Args[0] 47720 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 47721 break 47722 } 47723 v.reset(OpAMD64SUBQload) 47724 v.AuxInt = off 47725 v.Aux = sym 47726 v.AddArg(x) 47727 v.AddArg(ptr) 47728 v.AddArg(mem) 47729 return true 47730 } 47731 return false 47732 } 47733 func rewriteValueAMD64_OpAMD64SUBQborrow_0(v *Value) bool { 47734 // match: (SUBQborrow x (MOVQconst [c])) 47735 // cond: is32Bit(c) 47736 // result: (SUBQconstborrow x [c]) 47737 for { 47738 _ = v.Args[1] 47739 x := v.Args[0] 47740 v_1 := v.Args[1] 47741 if v_1.Op != OpAMD64MOVQconst { 47742 break 47743 } 47744 c := v_1.AuxInt 47745 if !(is32Bit(c)) { 47746 break 47747 } 47748 v.reset(OpAMD64SUBQconstborrow) 47749 v.AuxInt = c 47750 v.AddArg(x) 47751 return true 47752 } 47753 return false 47754 } 47755 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { 47756 // match: (SUBQconst [0] x) 47757 // result: x 47758 for { 47759 if v.AuxInt != 0 { 47760 break 47761 } 47762 x := v.Args[0] 47763 v.reset(OpCopy) 47764 v.Type = x.Type 47765 v.AddArg(x) 47766 return true 47767 } 47768 // match: (SUBQconst [c] x) 47769 // cond: c != -(1<<31) 47770 // result: (ADDQconst [-c] x) 47771 for { 47772 c := v.AuxInt 47773 x := v.Args[0] 47774 if !(c != -(1 << 31)) { 47775 break 47776 } 47777 v.reset(OpAMD64ADDQconst) 47778 v.AuxInt = -c 47779 v.AddArg(x) 47780 return true 47781 } 47782 // match: (SUBQconst (MOVQconst [d]) [c]) 47783 // result: (MOVQconst [d-c]) 47784 for { 47785 c := v.AuxInt 47786 v_0 := v.Args[0] 47787 if v_0.Op != OpAMD64MOVQconst { 47788 break 47789 } 47790 d := v_0.AuxInt 47791 v.reset(OpAMD64MOVQconst) 47792 v.AuxInt = d - c 47793 return true 47794 } 47795 // match: (SUBQconst (SUBQconst x [d]) [c]) 47796 // cond: is32Bit(-c-d) 47797 // result: (ADDQconst [-c-d] x) 47798 for { 47799 c := v.AuxInt 47800 v_0 := v.Args[0] 47801 if v_0.Op != OpAMD64SUBQconst { 47802 break 47803 } 47804 d := v_0.AuxInt 47805 x := v_0.Args[0] 47806 if !(is32Bit(-c - d)) { 47807 break 47808 } 47809 v.reset(OpAMD64ADDQconst) 47810 v.AuxInt = -c - d 47811 v.AddArg(x) 47812 return true 47813 } 47814 return false 47815 } 47816 func rewriteValueAMD64_OpAMD64SUBQload_0(v *Value) bool { 47817 b := v.Block 47818 typ := &b.Func.Config.Types 47819 // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem) 47820 // cond: is32Bit(off1+off2) 47821 // result: (SUBQload [off1+off2] {sym} val base mem) 47822 for { 47823 off1 := v.AuxInt 47824 sym := v.Aux 47825 mem := v.Args[2] 47826 val := v.Args[0] 47827 v_1 := v.Args[1] 47828 if v_1.Op != OpAMD64ADDQconst { 47829 break 47830 } 47831 off2 := v_1.AuxInt 47832 base := v_1.Args[0] 47833 if !(is32Bit(off1 + off2)) { 47834 break 47835 } 47836 v.reset(OpAMD64SUBQload) 47837 v.AuxInt = off1 + off2 47838 v.Aux = sym 47839 v.AddArg(val) 47840 v.AddArg(base) 47841 v.AddArg(mem) 47842 return true 47843 } 47844 // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 47845 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 47846 // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 47847 for { 47848 off1 := v.AuxInt 47849 sym1 := v.Aux 47850 mem := v.Args[2] 47851 val := v.Args[0] 47852 v_1 := v.Args[1] 47853 if v_1.Op != OpAMD64LEAQ { 47854 break 47855 } 47856 off2 := v_1.AuxInt 47857 sym2 := v_1.Aux 47858 base := v_1.Args[0] 47859 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 47860 break 47861 } 47862 v.reset(OpAMD64SUBQload) 47863 v.AuxInt = off1 + off2 47864 v.Aux = mergeSym(sym1, sym2) 47865 v.AddArg(val) 47866 v.AddArg(base) 47867 v.AddArg(mem) 47868 return true 47869 } 47870 // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 47871 // result: (SUBQ x (MOVQf2i y)) 47872 for { 47873 off := v.AuxInt 47874 sym := v.Aux 47875 _ = v.Args[2] 47876 x := v.Args[0] 47877 ptr := v.Args[1] 47878 v_2 := v.Args[2] 47879 if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { 47880 break 47881 } 47882 _ = v_2.Args[2] 47883 if ptr != v_2.Args[0] { 47884 break 47885 } 47886 y := v_2.Args[1] 47887 v.reset(OpAMD64SUBQ) 47888 v.AddArg(x) 47889 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 47890 v0.AddArg(y) 47891 v.AddArg(v0) 47892 return true 47893 } 47894 return false 47895 } 47896 func rewriteValueAMD64_OpAMD64SUBQmodify_0(v *Value) bool { 47897 // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 47898 // cond: is32Bit(off1+off2) 47899 // result: (SUBQmodify [off1+off2] {sym} base val mem) 47900 for { 47901 off1 := v.AuxInt 47902 sym := v.Aux 47903 mem := v.Args[2] 47904 v_0 := v.Args[0] 47905 if v_0.Op != OpAMD64ADDQconst { 47906 break 47907 } 47908 off2 := v_0.AuxInt 47909 base := v_0.Args[0] 47910 val := v.Args[1] 47911 if !(is32Bit(off1 + off2)) { 47912 break 47913 } 47914 v.reset(OpAMD64SUBQmodify) 47915 v.AuxInt = off1 + off2 47916 v.Aux = sym 47917 v.AddArg(base) 47918 v.AddArg(val) 47919 v.AddArg(mem) 47920 return true 47921 } 47922 // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 47923 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 47924 // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 47925 for { 47926 off1 := v.AuxInt 47927 sym1 := v.Aux 47928 mem := v.Args[2] 47929 v_0 := v.Args[0] 47930 if v_0.Op != OpAMD64LEAQ { 47931 break 47932 } 47933 off2 := v_0.AuxInt 47934 sym2 := v_0.Aux 47935 base := v_0.Args[0] 47936 val := v.Args[1] 47937 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 47938 break 47939 } 47940 v.reset(OpAMD64SUBQmodify) 47941 v.AuxInt = off1 + off2 47942 v.Aux = mergeSym(sym1, sym2) 47943 v.AddArg(base) 47944 v.AddArg(val) 47945 v.AddArg(mem) 47946 return true 47947 } 47948 return false 47949 } 47950 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { 47951 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 47952 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 47953 // result: (SUBSDload x [off] {sym} ptr mem) 47954 for { 47955 _ = v.Args[1] 47956 x := v.Args[0] 47957 l := v.Args[1] 47958 if l.Op != OpAMD64MOVSDload { 47959 break 47960 } 47961 off := l.AuxInt 47962 sym := l.Aux 47963 mem := l.Args[1] 47964 ptr := l.Args[0] 47965 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 47966 break 47967 } 47968 v.reset(OpAMD64SUBSDload) 47969 v.AuxInt = off 47970 v.Aux = sym 47971 v.AddArg(x) 47972 v.AddArg(ptr) 47973 v.AddArg(mem) 47974 return true 47975 } 47976 return false 47977 } 47978 func rewriteValueAMD64_OpAMD64SUBSDload_0(v *Value) bool { 47979 b := v.Block 47980 typ := &b.Func.Config.Types 47981 // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem) 47982 // cond: is32Bit(off1+off2) 47983 // result: (SUBSDload [off1+off2] {sym} val base mem) 47984 for { 47985 off1 := v.AuxInt 47986 sym := v.Aux 47987 mem := v.Args[2] 47988 val := v.Args[0] 47989 v_1 := v.Args[1] 47990 if v_1.Op != OpAMD64ADDQconst { 47991 break 47992 } 47993 off2 := v_1.AuxInt 47994 base := v_1.Args[0] 47995 if !(is32Bit(off1 + off2)) { 47996 break 47997 } 47998 v.reset(OpAMD64SUBSDload) 47999 v.AuxInt = off1 + off2 48000 v.Aux = sym 48001 v.AddArg(val) 48002 v.AddArg(base) 48003 v.AddArg(mem) 48004 return true 48005 } 48006 // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 48007 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 48008 // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 48009 for { 48010 off1 := v.AuxInt 48011 sym1 := v.Aux 48012 mem := v.Args[2] 48013 val := v.Args[0] 48014 v_1 := v.Args[1] 48015 if v_1.Op != OpAMD64LEAQ { 48016 break 48017 } 48018 off2 := v_1.AuxInt 48019 sym2 := v_1.Aux 48020 base := v_1.Args[0] 48021 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 48022 break 48023 } 48024 v.reset(OpAMD64SUBSDload) 48025 v.AuxInt = off1 + off2 48026 v.Aux = mergeSym(sym1, sym2) 48027 v.AddArg(val) 48028 v.AddArg(base) 48029 v.AddArg(mem) 48030 return true 48031 } 48032 // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 48033 // result: (SUBSD x (MOVQi2f y)) 48034 for { 48035 off := v.AuxInt 48036 sym := v.Aux 48037 _ = v.Args[2] 48038 x := v.Args[0] 48039 ptr := v.Args[1] 48040 v_2 := v.Args[2] 48041 if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { 48042 break 48043 } 48044 _ = v_2.Args[2] 48045 if ptr != v_2.Args[0] { 48046 break 48047 } 48048 y := v_2.Args[1] 48049 v.reset(OpAMD64SUBSD) 48050 v.AddArg(x) 48051 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) 48052 v0.AddArg(y) 48053 v.AddArg(v0) 48054 return true 48055 } 48056 return false 48057 } 48058 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { 48059 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 48060 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 48061 // result: (SUBSSload x [off] {sym} ptr mem) 48062 for { 48063 _ = v.Args[1] 48064 x := v.Args[0] 48065 l := v.Args[1] 48066 if l.Op != OpAMD64MOVSSload { 48067 break 48068 } 48069 off := l.AuxInt 48070 sym := l.Aux 48071 mem := l.Args[1] 48072 ptr := l.Args[0] 48073 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 48074 break 48075 } 48076 v.reset(OpAMD64SUBSSload) 48077 v.AuxInt = off 48078 v.Aux = sym 48079 v.AddArg(x) 48080 v.AddArg(ptr) 48081 v.AddArg(mem) 48082 return true 48083 } 48084 return false 48085 } 48086 func rewriteValueAMD64_OpAMD64SUBSSload_0(v *Value) bool { 48087 b := v.Block 48088 typ := &b.Func.Config.Types 48089 // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem) 48090 // cond: is32Bit(off1+off2) 48091 // result: (SUBSSload [off1+off2] {sym} val base mem) 48092 for { 48093 off1 := v.AuxInt 48094 sym := v.Aux 48095 mem := v.Args[2] 48096 val := v.Args[0] 48097 v_1 := v.Args[1] 48098 if v_1.Op != OpAMD64ADDQconst { 48099 break 48100 } 48101 off2 := v_1.AuxInt 48102 base := v_1.Args[0] 48103 if !(is32Bit(off1 + off2)) { 48104 break 48105 } 48106 v.reset(OpAMD64SUBSSload) 48107 v.AuxInt = off1 + off2 48108 v.Aux = sym 48109 v.AddArg(val) 48110 v.AddArg(base) 48111 v.AddArg(mem) 48112 return true 48113 } 48114 // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 48115 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 48116 // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 48117 for { 48118 off1 := v.AuxInt 48119 sym1 := v.Aux 48120 mem := v.Args[2] 48121 val := v.Args[0] 48122 v_1 := v.Args[1] 48123 if v_1.Op != OpAMD64LEAQ { 48124 break 48125 } 48126 off2 := v_1.AuxInt 48127 sym2 := v_1.Aux 48128 base := v_1.Args[0] 48129 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 48130 break 48131 } 48132 v.reset(OpAMD64SUBSSload) 48133 v.AuxInt = off1 + off2 48134 v.Aux = mergeSym(sym1, sym2) 48135 v.AddArg(val) 48136 v.AddArg(base) 48137 v.AddArg(mem) 48138 return true 48139 } 48140 // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 48141 // result: (SUBSS x (MOVLi2f y)) 48142 for { 48143 off := v.AuxInt 48144 sym := v.Aux 48145 _ = v.Args[2] 48146 x := v.Args[0] 48147 ptr := v.Args[1] 48148 v_2 := v.Args[2] 48149 if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { 48150 break 48151 } 48152 _ = v_2.Args[2] 48153 if ptr != v_2.Args[0] { 48154 break 48155 } 48156 y := v_2.Args[1] 48157 v.reset(OpAMD64SUBSS) 48158 v.AddArg(x) 48159 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) 48160 v0.AddArg(y) 48161 v.AddArg(v0) 48162 return true 48163 } 48164 return false 48165 } 48166 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { 48167 b := v.Block 48168 // match: (TESTB (MOVLconst [c]) x) 48169 // result: (TESTBconst [c] x) 48170 for { 48171 x := v.Args[1] 48172 v_0 := v.Args[0] 48173 if v_0.Op != OpAMD64MOVLconst { 48174 break 48175 } 48176 c := v_0.AuxInt 48177 v.reset(OpAMD64TESTBconst) 48178 v.AuxInt = c 48179 v.AddArg(x) 48180 return true 48181 } 48182 // match: (TESTB x (MOVLconst [c])) 48183 // result: (TESTBconst [c] x) 48184 for { 48185 _ = v.Args[1] 48186 x := v.Args[0] 48187 v_1 := v.Args[1] 48188 if v_1.Op != OpAMD64MOVLconst { 48189 break 48190 } 48191 c := v_1.AuxInt 48192 v.reset(OpAMD64TESTBconst) 48193 v.AuxInt = c 48194 v.AddArg(x) 48195 return true 48196 } 48197 // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2) 48198 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 48199 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem) 48200 for { 48201 l2 := v.Args[1] 48202 l := v.Args[0] 48203 if l.Op != OpAMD64MOVBload { 48204 break 48205 } 48206 off := l.AuxInt 48207 sym := l.Aux 48208 mem := l.Args[1] 48209 ptr := l.Args[0] 48210 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 48211 break 48212 } 48213 b = l.Block 48214 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) 48215 v.reset(OpCopy) 48216 v.AddArg(v0) 48217 v0.AuxInt = makeValAndOff(0, off) 48218 v0.Aux = sym 48219 v0.AddArg(ptr) 48220 v0.AddArg(mem) 48221 return true 48222 } 48223 // match: (TESTB l2 l:(MOVBload {sym} [off] ptr mem)) 48224 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 48225 // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem) 48226 for { 48227 _ = v.Args[1] 48228 l2 := v.Args[0] 48229 l := v.Args[1] 48230 if l.Op != OpAMD64MOVBload { 48231 break 48232 } 48233 off := l.AuxInt 48234 sym := l.Aux 48235 mem := l.Args[1] 48236 ptr := l.Args[0] 48237 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 48238 break 48239 } 48240 b = l.Block 48241 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) 48242 v.reset(OpCopy) 48243 v.AddArg(v0) 48244 v0.AuxInt = makeValAndOff(0, off) 48245 v0.Aux = sym 48246 v0.AddArg(ptr) 48247 v0.AddArg(mem) 48248 return true 48249 } 48250 return false 48251 } 48252 func rewriteValueAMD64_OpAMD64TESTBconst_0(v *Value) bool { 48253 // match: (TESTBconst [-1] x) 48254 // cond: x.Op != OpAMD64MOVLconst 48255 // result: (TESTB x x) 48256 for { 48257 if v.AuxInt != -1 { 48258 break 48259 } 48260 x := v.Args[0] 48261 if !(x.Op != OpAMD64MOVLconst) { 48262 break 48263 } 48264 v.reset(OpAMD64TESTB) 48265 v.AddArg(x) 48266 v.AddArg(x) 48267 return true 48268 } 48269 return false 48270 } 48271 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { 48272 b := v.Block 48273 // match: (TESTL (MOVLconst [c]) x) 48274 // result: (TESTLconst [c] x) 48275 for { 48276 x := v.Args[1] 48277 v_0 := v.Args[0] 48278 if v_0.Op != OpAMD64MOVLconst { 48279 break 48280 } 48281 c := v_0.AuxInt 48282 v.reset(OpAMD64TESTLconst) 48283 v.AuxInt = c 48284 v.AddArg(x) 48285 return true 48286 } 48287 // match: (TESTL x (MOVLconst [c])) 48288 // result: (TESTLconst [c] x) 48289 for { 48290 _ = v.Args[1] 48291 x := v.Args[0] 48292 v_1 := v.Args[1] 48293 if v_1.Op != OpAMD64MOVLconst { 48294 break 48295 } 48296 c := v_1.AuxInt 48297 v.reset(OpAMD64TESTLconst) 48298 v.AuxInt = c 48299 v.AddArg(x) 48300 return true 48301 } 48302 // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2) 48303 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 48304 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem) 48305 for { 48306 l2 := v.Args[1] 48307 l := v.Args[0] 48308 if l.Op != OpAMD64MOVLload { 48309 break 48310 } 48311 off := l.AuxInt 48312 sym := l.Aux 48313 mem := l.Args[1] 48314 ptr := l.Args[0] 48315 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 48316 break 48317 } 48318 b = l.Block 48319 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) 48320 v.reset(OpCopy) 48321 v.AddArg(v0) 48322 v0.AuxInt = makeValAndOff(0, off) 48323 v0.Aux = sym 48324 v0.AddArg(ptr) 48325 v0.AddArg(mem) 48326 return true 48327 } 48328 // match: (TESTL l2 l:(MOVLload {sym} [off] ptr mem)) 48329 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 48330 // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem) 48331 for { 48332 _ = v.Args[1] 48333 l2 := v.Args[0] 48334 l := v.Args[1] 48335 if l.Op != OpAMD64MOVLload { 48336 break 48337 } 48338 off := l.AuxInt 48339 sym := l.Aux 48340 mem := l.Args[1] 48341 ptr := l.Args[0] 48342 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 48343 break 48344 } 48345 b = l.Block 48346 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) 48347 v.reset(OpCopy) 48348 v.AddArg(v0) 48349 v0.AuxInt = makeValAndOff(0, off) 48350 v0.Aux = sym 48351 v0.AddArg(ptr) 48352 v0.AddArg(mem) 48353 return true 48354 } 48355 return false 48356 } 48357 func rewriteValueAMD64_OpAMD64TESTLconst_0(v *Value) bool { 48358 // match: (TESTLconst [-1] x) 48359 // cond: x.Op != OpAMD64MOVLconst 48360 // result: (TESTL x x) 48361 for { 48362 if v.AuxInt != -1 { 48363 break 48364 } 48365 x := v.Args[0] 48366 if !(x.Op != OpAMD64MOVLconst) { 48367 break 48368 } 48369 v.reset(OpAMD64TESTL) 48370 v.AddArg(x) 48371 v.AddArg(x) 48372 return true 48373 } 48374 return false 48375 } 48376 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { 48377 b := v.Block 48378 // match: (TESTQ (MOVQconst [c]) x) 48379 // cond: is32Bit(c) 48380 // result: (TESTQconst [c] x) 48381 for { 48382 x := v.Args[1] 48383 v_0 := v.Args[0] 48384 if v_0.Op != OpAMD64MOVQconst { 48385 break 48386 } 48387 c := v_0.AuxInt 48388 if !(is32Bit(c)) { 48389 break 48390 } 48391 v.reset(OpAMD64TESTQconst) 48392 v.AuxInt = c 48393 v.AddArg(x) 48394 return true 48395 } 48396 // match: (TESTQ x (MOVQconst [c])) 48397 // cond: is32Bit(c) 48398 // result: (TESTQconst [c] x) 48399 for { 48400 _ = v.Args[1] 48401 x := v.Args[0] 48402 v_1 := v.Args[1] 48403 if v_1.Op != OpAMD64MOVQconst { 48404 break 48405 } 48406 c := v_1.AuxInt 48407 if !(is32Bit(c)) { 48408 break 48409 } 48410 v.reset(OpAMD64TESTQconst) 48411 v.AuxInt = c 48412 v.AddArg(x) 48413 return true 48414 } 48415 // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2) 48416 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 48417 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem) 48418 for { 48419 l2 := v.Args[1] 48420 l := v.Args[0] 48421 if l.Op != OpAMD64MOVQload { 48422 break 48423 } 48424 off := l.AuxInt 48425 sym := l.Aux 48426 mem := l.Args[1] 48427 ptr := l.Args[0] 48428 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 48429 break 48430 } 48431 b = l.Block 48432 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) 48433 v.reset(OpCopy) 48434 v.AddArg(v0) 48435 v0.AuxInt = makeValAndOff(0, off) 48436 v0.Aux = sym 48437 v0.AddArg(ptr) 48438 v0.AddArg(mem) 48439 return true 48440 } 48441 // match: (TESTQ l2 l:(MOVQload {sym} [off] ptr mem)) 48442 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 48443 // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem) 48444 for { 48445 _ = v.Args[1] 48446 l2 := v.Args[0] 48447 l := v.Args[1] 48448 if l.Op != OpAMD64MOVQload { 48449 break 48450 } 48451 off := l.AuxInt 48452 sym := l.Aux 48453 mem := l.Args[1] 48454 ptr := l.Args[0] 48455 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 48456 break 48457 } 48458 b = l.Block 48459 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) 48460 v.reset(OpCopy) 48461 v.AddArg(v0) 48462 v0.AuxInt = makeValAndOff(0, off) 48463 v0.Aux = sym 48464 v0.AddArg(ptr) 48465 v0.AddArg(mem) 48466 return true 48467 } 48468 return false 48469 } 48470 func rewriteValueAMD64_OpAMD64TESTQconst_0(v *Value) bool { 48471 // match: (TESTQconst [-1] x) 48472 // cond: x.Op != OpAMD64MOVQconst 48473 // result: (TESTQ x x) 48474 for { 48475 if v.AuxInt != -1 { 48476 break 48477 } 48478 x := v.Args[0] 48479 if !(x.Op != OpAMD64MOVQconst) { 48480 break 48481 } 48482 v.reset(OpAMD64TESTQ) 48483 v.AddArg(x) 48484 v.AddArg(x) 48485 return true 48486 } 48487 return false 48488 } 48489 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { 48490 b := v.Block 48491 // match: (TESTW (MOVLconst [c]) x) 48492 // result: (TESTWconst [c] x) 48493 for { 48494 x := v.Args[1] 48495 v_0 := v.Args[0] 48496 if v_0.Op != OpAMD64MOVLconst { 48497 break 48498 } 48499 c := v_0.AuxInt 48500 v.reset(OpAMD64TESTWconst) 48501 v.AuxInt = c 48502 v.AddArg(x) 48503 return true 48504 } 48505 // match: (TESTW x (MOVLconst [c])) 48506 // result: (TESTWconst [c] x) 48507 for { 48508 _ = v.Args[1] 48509 x := v.Args[0] 48510 v_1 := v.Args[1] 48511 if v_1.Op != OpAMD64MOVLconst { 48512 break 48513 } 48514 c := v_1.AuxInt 48515 v.reset(OpAMD64TESTWconst) 48516 v.AuxInt = c 48517 v.AddArg(x) 48518 return true 48519 } 48520 // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2) 48521 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 48522 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem) 48523 for { 48524 l2 := v.Args[1] 48525 l := v.Args[0] 48526 if l.Op != OpAMD64MOVWload { 48527 break 48528 } 48529 off := l.AuxInt 48530 sym := l.Aux 48531 mem := l.Args[1] 48532 ptr := l.Args[0] 48533 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 48534 break 48535 } 48536 b = l.Block 48537 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) 48538 v.reset(OpCopy) 48539 v.AddArg(v0) 48540 v0.AuxInt = makeValAndOff(0, off) 48541 v0.Aux = sym 48542 v0.AddArg(ptr) 48543 v0.AddArg(mem) 48544 return true 48545 } 48546 // match: (TESTW l2 l:(MOVWload {sym} [off] ptr mem)) 48547 // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) 48548 // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem) 48549 for { 48550 _ = v.Args[1] 48551 l2 := v.Args[0] 48552 l := v.Args[1] 48553 if l.Op != OpAMD64MOVWload { 48554 break 48555 } 48556 off := l.AuxInt 48557 sym := l.Aux 48558 mem := l.Args[1] 48559 ptr := l.Args[0] 48560 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { 48561 break 48562 } 48563 b = l.Block 48564 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) 48565 v.reset(OpCopy) 48566 v.AddArg(v0) 48567 v0.AuxInt = makeValAndOff(0, off) 48568 v0.Aux = sym 48569 v0.AddArg(ptr) 48570 v0.AddArg(mem) 48571 return true 48572 } 48573 return false 48574 } 48575 func rewriteValueAMD64_OpAMD64TESTWconst_0(v *Value) bool { 48576 // match: (TESTWconst [-1] x) 48577 // cond: x.Op != OpAMD64MOVLconst 48578 // result: (TESTW x x) 48579 for { 48580 if v.AuxInt != -1 { 48581 break 48582 } 48583 x := v.Args[0] 48584 if !(x.Op != OpAMD64MOVLconst) { 48585 break 48586 } 48587 v.reset(OpAMD64TESTW) 48588 v.AddArg(x) 48589 v.AddArg(x) 48590 return true 48591 } 48592 return false 48593 } 48594 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { 48595 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 48596 // cond: is32Bit(off1+off2) 48597 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 48598 for { 48599 off1 := v.AuxInt 48600 sym := v.Aux 48601 mem := v.Args[2] 48602 val := v.Args[0] 48603 v_1 := v.Args[1] 48604 if v_1.Op != OpAMD64ADDQconst { 48605 break 48606 } 48607 off2 := v_1.AuxInt 48608 ptr := v_1.Args[0] 48609 if !(is32Bit(off1 + off2)) { 48610 break 48611 } 48612 v.reset(OpAMD64XADDLlock) 48613 v.AuxInt = off1 + off2 48614 v.Aux = sym 48615 v.AddArg(val) 48616 v.AddArg(ptr) 48617 v.AddArg(mem) 48618 return true 48619 } 48620 return false 48621 } 48622 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { 48623 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 48624 // cond: is32Bit(off1+off2) 48625 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 48626 for { 48627 off1 := v.AuxInt 48628 sym := v.Aux 48629 mem := v.Args[2] 48630 val := v.Args[0] 48631 v_1 := v.Args[1] 48632 if v_1.Op != OpAMD64ADDQconst { 48633 break 48634 } 48635 off2 := v_1.AuxInt 48636 ptr := v_1.Args[0] 48637 if !(is32Bit(off1 + off2)) { 48638 break 48639 } 48640 v.reset(OpAMD64XADDQlock) 48641 v.AuxInt = off1 + off2 48642 v.Aux = sym 48643 v.AddArg(val) 48644 v.AddArg(ptr) 48645 v.AddArg(mem) 48646 return true 48647 } 48648 return false 48649 } 48650 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { 48651 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 48652 // cond: is32Bit(off1+off2) 48653 // result: (XCHGL [off1+off2] {sym} val ptr mem) 48654 for { 48655 off1 := v.AuxInt 48656 sym := v.Aux 48657 mem := v.Args[2] 48658 val := v.Args[0] 48659 v_1 := v.Args[1] 48660 if v_1.Op != OpAMD64ADDQconst { 48661 break 48662 } 48663 off2 := v_1.AuxInt 48664 ptr := v_1.Args[0] 48665 if !(is32Bit(off1 + off2)) { 48666 break 48667 } 48668 v.reset(OpAMD64XCHGL) 48669 v.AuxInt = off1 + off2 48670 v.Aux = sym 48671 v.AddArg(val) 48672 v.AddArg(ptr) 48673 v.AddArg(mem) 48674 return true 48675 } 48676 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 48677 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 48678 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 48679 for { 48680 off1 := v.AuxInt 48681 sym1 := v.Aux 48682 mem := v.Args[2] 48683 val := v.Args[0] 48684 v_1 := v.Args[1] 48685 if v_1.Op != OpAMD64LEAQ { 48686 break 48687 } 48688 off2 := v_1.AuxInt 48689 sym2 := v_1.Aux 48690 ptr := v_1.Args[0] 48691 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 48692 break 48693 } 48694 v.reset(OpAMD64XCHGL) 48695 v.AuxInt = off1 + off2 48696 v.Aux = mergeSym(sym1, sym2) 48697 v.AddArg(val) 48698 v.AddArg(ptr) 48699 v.AddArg(mem) 48700 return true 48701 } 48702 return false 48703 } 48704 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { 48705 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 48706 // cond: is32Bit(off1+off2) 48707 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 48708 for { 48709 off1 := v.AuxInt 48710 sym := v.Aux 48711 mem := v.Args[2] 48712 val := v.Args[0] 48713 v_1 := v.Args[1] 48714 if v_1.Op != OpAMD64ADDQconst { 48715 break 48716 } 48717 off2 := v_1.AuxInt 48718 ptr := v_1.Args[0] 48719 if !(is32Bit(off1 + off2)) { 48720 break 48721 } 48722 v.reset(OpAMD64XCHGQ) 48723 v.AuxInt = off1 + off2 48724 v.Aux = sym 48725 v.AddArg(val) 48726 v.AddArg(ptr) 48727 v.AddArg(mem) 48728 return true 48729 } 48730 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 48731 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 48732 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 48733 for { 48734 off1 := v.AuxInt 48735 sym1 := v.Aux 48736 mem := v.Args[2] 48737 val := v.Args[0] 48738 v_1 := v.Args[1] 48739 if v_1.Op != OpAMD64LEAQ { 48740 break 48741 } 48742 off2 := v_1.AuxInt 48743 sym2 := v_1.Aux 48744 ptr := v_1.Args[0] 48745 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 48746 break 48747 } 48748 v.reset(OpAMD64XCHGQ) 48749 v.AuxInt = off1 + off2 48750 v.Aux = mergeSym(sym1, sym2) 48751 v.AddArg(val) 48752 v.AddArg(ptr) 48753 v.AddArg(mem) 48754 return true 48755 } 48756 return false 48757 } 48758 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { 48759 // match: (XORL (SHLL (MOVLconst [1]) y) x) 48760 // result: (BTCL x y) 48761 for { 48762 x := v.Args[1] 48763 v_0 := v.Args[0] 48764 if v_0.Op != OpAMD64SHLL { 48765 break 48766 } 48767 y := v_0.Args[1] 48768 v_0_0 := v_0.Args[0] 48769 if v_0_0.Op != OpAMD64MOVLconst || v_0_0.AuxInt != 1 { 48770 break 48771 } 48772 v.reset(OpAMD64BTCL) 48773 v.AddArg(x) 48774 v.AddArg(y) 48775 return true 48776 } 48777 // match: (XORL x (SHLL (MOVLconst [1]) y)) 48778 // result: (BTCL x y) 48779 for { 48780 _ = v.Args[1] 48781 x := v.Args[0] 48782 v_1 := v.Args[1] 48783 if v_1.Op != OpAMD64SHLL { 48784 break 48785 } 48786 y := v_1.Args[1] 48787 v_1_0 := v_1.Args[0] 48788 if v_1_0.Op != OpAMD64MOVLconst || v_1_0.AuxInt != 1 { 48789 break 48790 } 48791 v.reset(OpAMD64BTCL) 48792 v.AddArg(x) 48793 v.AddArg(y) 48794 return true 48795 } 48796 // match: (XORL (MOVLconst [c]) x) 48797 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 48798 // result: (BTCLconst [log2uint32(c)] x) 48799 for { 48800 x := v.Args[1] 48801 v_0 := v.Args[0] 48802 if v_0.Op != OpAMD64MOVLconst { 48803 break 48804 } 48805 c := v_0.AuxInt 48806 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { 48807 break 48808 } 48809 v.reset(OpAMD64BTCLconst) 48810 v.AuxInt = log2uint32(c) 48811 v.AddArg(x) 48812 return true 48813 } 48814 // match: (XORL x (MOVLconst [c])) 48815 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 48816 // result: (BTCLconst [log2uint32(c)] x) 48817 for { 48818 _ = v.Args[1] 48819 x := v.Args[0] 48820 v_1 := v.Args[1] 48821 if v_1.Op != OpAMD64MOVLconst { 48822 break 48823 } 48824 c := v_1.AuxInt 48825 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { 48826 break 48827 } 48828 v.reset(OpAMD64BTCLconst) 48829 v.AuxInt = log2uint32(c) 48830 v.AddArg(x) 48831 return true 48832 } 48833 // match: (XORL x (MOVLconst [c])) 48834 // result: (XORLconst [c] x) 48835 for { 48836 _ = v.Args[1] 48837 x := v.Args[0] 48838 v_1 := v.Args[1] 48839 if v_1.Op != OpAMD64MOVLconst { 48840 break 48841 } 48842 c := v_1.AuxInt 48843 v.reset(OpAMD64XORLconst) 48844 v.AuxInt = c 48845 v.AddArg(x) 48846 return true 48847 } 48848 // match: (XORL (MOVLconst [c]) x) 48849 // result: (XORLconst [c] x) 48850 for { 48851 x := v.Args[1] 48852 v_0 := v.Args[0] 48853 if v_0.Op != OpAMD64MOVLconst { 48854 break 48855 } 48856 c := v_0.AuxInt 48857 v.reset(OpAMD64XORLconst) 48858 v.AuxInt = c 48859 v.AddArg(x) 48860 return true 48861 } 48862 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 48863 // cond: d==32-c 48864 // result: (ROLLconst x [c]) 48865 for { 48866 _ = v.Args[1] 48867 v_0 := v.Args[0] 48868 if v_0.Op != OpAMD64SHLLconst { 48869 break 48870 } 48871 c := v_0.AuxInt 48872 x := v_0.Args[0] 48873 v_1 := v.Args[1] 48874 if v_1.Op != OpAMD64SHRLconst { 48875 break 48876 } 48877 d := v_1.AuxInt 48878 if x != v_1.Args[0] || !(d == 32-c) { 48879 break 48880 } 48881 v.reset(OpAMD64ROLLconst) 48882 v.AuxInt = c 48883 v.AddArg(x) 48884 return true 48885 } 48886 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 48887 // cond: d==32-c 48888 // result: (ROLLconst x [c]) 48889 for { 48890 _ = v.Args[1] 48891 v_0 := v.Args[0] 48892 if v_0.Op != OpAMD64SHRLconst { 48893 break 48894 } 48895 d := v_0.AuxInt 48896 x := v_0.Args[0] 48897 v_1 := v.Args[1] 48898 if v_1.Op != OpAMD64SHLLconst { 48899 break 48900 } 48901 c := v_1.AuxInt 48902 if x != v_1.Args[0] || !(d == 32-c) { 48903 break 48904 } 48905 v.reset(OpAMD64ROLLconst) 48906 v.AuxInt = c 48907 v.AddArg(x) 48908 return true 48909 } 48910 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 48911 // cond: d==16-c && c < 16 && t.Size() == 2 48912 // result: (ROLWconst x [c]) 48913 for { 48914 t := v.Type 48915 _ = v.Args[1] 48916 v_0 := v.Args[0] 48917 if v_0.Op != OpAMD64SHLLconst { 48918 break 48919 } 48920 c := v_0.AuxInt 48921 x := v_0.Args[0] 48922 v_1 := v.Args[1] 48923 if v_1.Op != OpAMD64SHRWconst { 48924 break 48925 } 48926 d := v_1.AuxInt 48927 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { 48928 break 48929 } 48930 v.reset(OpAMD64ROLWconst) 48931 v.AuxInt = c 48932 v.AddArg(x) 48933 return true 48934 } 48935 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 48936 // cond: d==16-c && c < 16 && t.Size() == 2 48937 // result: (ROLWconst x [c]) 48938 for { 48939 t := v.Type 48940 _ = v.Args[1] 48941 v_0 := v.Args[0] 48942 if v_0.Op != OpAMD64SHRWconst { 48943 break 48944 } 48945 d := v_0.AuxInt 48946 x := v_0.Args[0] 48947 v_1 := v.Args[1] 48948 if v_1.Op != OpAMD64SHLLconst { 48949 break 48950 } 48951 c := v_1.AuxInt 48952 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { 48953 break 48954 } 48955 v.reset(OpAMD64ROLWconst) 48956 v.AuxInt = c 48957 v.AddArg(x) 48958 return true 48959 } 48960 return false 48961 } 48962 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { 48963 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 48964 // cond: d==8-c && c < 8 && t.Size() == 1 48965 // result: (ROLBconst x [c]) 48966 for { 48967 t := v.Type 48968 _ = v.Args[1] 48969 v_0 := v.Args[0] 48970 if v_0.Op != OpAMD64SHLLconst { 48971 break 48972 } 48973 c := v_0.AuxInt 48974 x := v_0.Args[0] 48975 v_1 := v.Args[1] 48976 if v_1.Op != OpAMD64SHRBconst { 48977 break 48978 } 48979 d := v_1.AuxInt 48980 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { 48981 break 48982 } 48983 v.reset(OpAMD64ROLBconst) 48984 v.AuxInt = c 48985 v.AddArg(x) 48986 return true 48987 } 48988 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 48989 // cond: d==8-c && c < 8 && t.Size() == 1 48990 // result: (ROLBconst x [c]) 48991 for { 48992 t := v.Type 48993 _ = v.Args[1] 48994 v_0 := v.Args[0] 48995 if v_0.Op != OpAMD64SHRBconst { 48996 break 48997 } 48998 d := v_0.AuxInt 48999 x := v_0.Args[0] 49000 v_1 := v.Args[1] 49001 if v_1.Op != OpAMD64SHLLconst { 49002 break 49003 } 49004 c := v_1.AuxInt 49005 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { 49006 break 49007 } 49008 v.reset(OpAMD64ROLBconst) 49009 v.AuxInt = c 49010 v.AddArg(x) 49011 return true 49012 } 49013 // match: (XORL x x) 49014 // result: (MOVLconst [0]) 49015 for { 49016 x := v.Args[1] 49017 if x != v.Args[0] { 49018 break 49019 } 49020 v.reset(OpAMD64MOVLconst) 49021 v.AuxInt = 0 49022 return true 49023 } 49024 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 49025 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 49026 // result: (XORLload x [off] {sym} ptr mem) 49027 for { 49028 _ = v.Args[1] 49029 x := v.Args[0] 49030 l := v.Args[1] 49031 if l.Op != OpAMD64MOVLload { 49032 break 49033 } 49034 off := l.AuxInt 49035 sym := l.Aux 49036 mem := l.Args[1] 49037 ptr := l.Args[0] 49038 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 49039 break 49040 } 49041 v.reset(OpAMD64XORLload) 49042 v.AuxInt = off 49043 v.Aux = sym 49044 v.AddArg(x) 49045 v.AddArg(ptr) 49046 v.AddArg(mem) 49047 return true 49048 } 49049 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 49050 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 49051 // result: (XORLload x [off] {sym} ptr mem) 49052 for { 49053 x := v.Args[1] 49054 l := v.Args[0] 49055 if l.Op != OpAMD64MOVLload { 49056 break 49057 } 49058 off := l.AuxInt 49059 sym := l.Aux 49060 mem := l.Args[1] 49061 ptr := l.Args[0] 49062 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 49063 break 49064 } 49065 v.reset(OpAMD64XORLload) 49066 v.AuxInt = off 49067 v.Aux = sym 49068 v.AddArg(x) 49069 v.AddArg(ptr) 49070 v.AddArg(mem) 49071 return true 49072 } 49073 return false 49074 } 49075 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { 49076 // match: (XORLconst [c] x) 49077 // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 49078 // result: (BTCLconst [log2uint32(c)] x) 49079 for { 49080 c := v.AuxInt 49081 x := v.Args[0] 49082 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128) { 49083 break 49084 } 49085 v.reset(OpAMD64BTCLconst) 49086 v.AuxInt = log2uint32(c) 49087 v.AddArg(x) 49088 return true 49089 } 49090 // match: (XORLconst [1] (SETNE x)) 49091 // result: (SETEQ x) 49092 for { 49093 if v.AuxInt != 1 { 49094 break 49095 } 49096 v_0 := v.Args[0] 49097 if v_0.Op != OpAMD64SETNE { 49098 break 49099 } 49100 x := v_0.Args[0] 49101 v.reset(OpAMD64SETEQ) 49102 v.AddArg(x) 49103 return true 49104 } 49105 // match: (XORLconst [1] (SETEQ x)) 49106 // result: (SETNE x) 49107 for { 49108 if v.AuxInt != 1 { 49109 break 49110 } 49111 v_0 := v.Args[0] 49112 if v_0.Op != OpAMD64SETEQ { 49113 break 49114 } 49115 x := v_0.Args[0] 49116 v.reset(OpAMD64SETNE) 49117 v.AddArg(x) 49118 return true 49119 } 49120 // match: (XORLconst [1] (SETL x)) 49121 // result: (SETGE x) 49122 for { 49123 if v.AuxInt != 1 { 49124 break 49125 } 49126 v_0 := v.Args[0] 49127 if v_0.Op != OpAMD64SETL { 49128 break 49129 } 49130 x := v_0.Args[0] 49131 v.reset(OpAMD64SETGE) 49132 v.AddArg(x) 49133 return true 49134 } 49135 // match: (XORLconst [1] (SETGE x)) 49136 // result: (SETL x) 49137 for { 49138 if v.AuxInt != 1 { 49139 break 49140 } 49141 v_0 := v.Args[0] 49142 if v_0.Op != OpAMD64SETGE { 49143 break 49144 } 49145 x := v_0.Args[0] 49146 v.reset(OpAMD64SETL) 49147 v.AddArg(x) 49148 return true 49149 } 49150 // match: (XORLconst [1] (SETLE x)) 49151 // result: (SETG x) 49152 for { 49153 if v.AuxInt != 1 { 49154 break 49155 } 49156 v_0 := v.Args[0] 49157 if v_0.Op != OpAMD64SETLE { 49158 break 49159 } 49160 x := v_0.Args[0] 49161 v.reset(OpAMD64SETG) 49162 v.AddArg(x) 49163 return true 49164 } 49165 // match: (XORLconst [1] (SETG x)) 49166 // result: (SETLE x) 49167 for { 49168 if v.AuxInt != 1 { 49169 break 49170 } 49171 v_0 := v.Args[0] 49172 if v_0.Op != OpAMD64SETG { 49173 break 49174 } 49175 x := v_0.Args[0] 49176 v.reset(OpAMD64SETLE) 49177 v.AddArg(x) 49178 return true 49179 } 49180 // match: (XORLconst [1] (SETB x)) 49181 // result: (SETAE x) 49182 for { 49183 if v.AuxInt != 1 { 49184 break 49185 } 49186 v_0 := v.Args[0] 49187 if v_0.Op != OpAMD64SETB { 49188 break 49189 } 49190 x := v_0.Args[0] 49191 v.reset(OpAMD64SETAE) 49192 v.AddArg(x) 49193 return true 49194 } 49195 // match: (XORLconst [1] (SETAE x)) 49196 // result: (SETB x) 49197 for { 49198 if v.AuxInt != 1 { 49199 break 49200 } 49201 v_0 := v.Args[0] 49202 if v_0.Op != OpAMD64SETAE { 49203 break 49204 } 49205 x := v_0.Args[0] 49206 v.reset(OpAMD64SETB) 49207 v.AddArg(x) 49208 return true 49209 } 49210 // match: (XORLconst [1] (SETBE x)) 49211 // result: (SETA x) 49212 for { 49213 if v.AuxInt != 1 { 49214 break 49215 } 49216 v_0 := v.Args[0] 49217 if v_0.Op != OpAMD64SETBE { 49218 break 49219 } 49220 x := v_0.Args[0] 49221 v.reset(OpAMD64SETA) 49222 v.AddArg(x) 49223 return true 49224 } 49225 return false 49226 } 49227 func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { 49228 // match: (XORLconst [1] (SETA x)) 49229 // result: (SETBE x) 49230 for { 49231 if v.AuxInt != 1 { 49232 break 49233 } 49234 v_0 := v.Args[0] 49235 if v_0.Op != OpAMD64SETA { 49236 break 49237 } 49238 x := v_0.Args[0] 49239 v.reset(OpAMD64SETBE) 49240 v.AddArg(x) 49241 return true 49242 } 49243 // match: (XORLconst [c] (XORLconst [d] x)) 49244 // result: (XORLconst [c ^ d] x) 49245 for { 49246 c := v.AuxInt 49247 v_0 := v.Args[0] 49248 if v_0.Op != OpAMD64XORLconst { 49249 break 49250 } 49251 d := v_0.AuxInt 49252 x := v_0.Args[0] 49253 v.reset(OpAMD64XORLconst) 49254 v.AuxInt = c ^ d 49255 v.AddArg(x) 49256 return true 49257 } 49258 // match: (XORLconst [c] (BTCLconst [d] x)) 49259 // result: (XORLconst [c ^ 1<<uint32(d)] x) 49260 for { 49261 c := v.AuxInt 49262 v_0 := v.Args[0] 49263 if v_0.Op != OpAMD64BTCLconst { 49264 break 49265 } 49266 d := v_0.AuxInt 49267 x := v_0.Args[0] 49268 v.reset(OpAMD64XORLconst) 49269 v.AuxInt = c ^ 1<<uint32(d) 49270 v.AddArg(x) 49271 return true 49272 } 49273 // match: (XORLconst [c] x) 49274 // cond: int32(c)==0 49275 // result: x 49276 for { 49277 c := v.AuxInt 49278 x := v.Args[0] 49279 if !(int32(c) == 0) { 49280 break 49281 } 49282 v.reset(OpCopy) 49283 v.Type = x.Type 49284 v.AddArg(x) 49285 return true 49286 } 49287 // match: (XORLconst [c] (MOVLconst [d])) 49288 // result: (MOVLconst [c^d]) 49289 for { 49290 c := v.AuxInt 49291 v_0 := v.Args[0] 49292 if v_0.Op != OpAMD64MOVLconst { 49293 break 49294 } 49295 d := v_0.AuxInt 49296 v.reset(OpAMD64MOVLconst) 49297 v.AuxInt = c ^ d 49298 return true 49299 } 49300 return false 49301 } 49302 func rewriteValueAMD64_OpAMD64XORLconstmodify_0(v *Value) bool { 49303 // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 49304 // cond: ValAndOff(valoff1).canAdd(off2) 49305 // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 49306 for { 49307 valoff1 := v.AuxInt 49308 sym := v.Aux 49309 mem := v.Args[1] 49310 v_0 := v.Args[0] 49311 if v_0.Op != OpAMD64ADDQconst { 49312 break 49313 } 49314 off2 := v_0.AuxInt 49315 base := v_0.Args[0] 49316 if !(ValAndOff(valoff1).canAdd(off2)) { 49317 break 49318 } 49319 v.reset(OpAMD64XORLconstmodify) 49320 v.AuxInt = ValAndOff(valoff1).add(off2) 49321 v.Aux = sym 49322 v.AddArg(base) 49323 v.AddArg(mem) 49324 return true 49325 } 49326 // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 49327 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 49328 // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 49329 for { 49330 valoff1 := v.AuxInt 49331 sym1 := v.Aux 49332 mem := v.Args[1] 49333 v_0 := v.Args[0] 49334 if v_0.Op != OpAMD64LEAQ { 49335 break 49336 } 49337 off2 := v_0.AuxInt 49338 sym2 := v_0.Aux 49339 base := v_0.Args[0] 49340 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 49341 break 49342 } 49343 v.reset(OpAMD64XORLconstmodify) 49344 v.AuxInt = ValAndOff(valoff1).add(off2) 49345 v.Aux = mergeSym(sym1, sym2) 49346 v.AddArg(base) 49347 v.AddArg(mem) 49348 return true 49349 } 49350 return false 49351 } 49352 func rewriteValueAMD64_OpAMD64XORLload_0(v *Value) bool { 49353 b := v.Block 49354 typ := &b.Func.Config.Types 49355 // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem) 49356 // cond: is32Bit(off1+off2) 49357 // result: (XORLload [off1+off2] {sym} val base mem) 49358 for { 49359 off1 := v.AuxInt 49360 sym := v.Aux 49361 mem := v.Args[2] 49362 val := v.Args[0] 49363 v_1 := v.Args[1] 49364 if v_1.Op != OpAMD64ADDQconst { 49365 break 49366 } 49367 off2 := v_1.AuxInt 49368 base := v_1.Args[0] 49369 if !(is32Bit(off1 + off2)) { 49370 break 49371 } 49372 v.reset(OpAMD64XORLload) 49373 v.AuxInt = off1 + off2 49374 v.Aux = sym 49375 v.AddArg(val) 49376 v.AddArg(base) 49377 v.AddArg(mem) 49378 return true 49379 } 49380 // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 49381 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 49382 // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 49383 for { 49384 off1 := v.AuxInt 49385 sym1 := v.Aux 49386 mem := v.Args[2] 49387 val := v.Args[0] 49388 v_1 := v.Args[1] 49389 if v_1.Op != OpAMD64LEAQ { 49390 break 49391 } 49392 off2 := v_1.AuxInt 49393 sym2 := v_1.Aux 49394 base := v_1.Args[0] 49395 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 49396 break 49397 } 49398 v.reset(OpAMD64XORLload) 49399 v.AuxInt = off1 + off2 49400 v.Aux = mergeSym(sym1, sym2) 49401 v.AddArg(val) 49402 v.AddArg(base) 49403 v.AddArg(mem) 49404 return true 49405 } 49406 // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 49407 // result: (XORL x (MOVLf2i y)) 49408 for { 49409 off := v.AuxInt 49410 sym := v.Aux 49411 _ = v.Args[2] 49412 x := v.Args[0] 49413 ptr := v.Args[1] 49414 v_2 := v.Args[2] 49415 if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { 49416 break 49417 } 49418 _ = v_2.Args[2] 49419 if ptr != v_2.Args[0] { 49420 break 49421 } 49422 y := v_2.Args[1] 49423 v.reset(OpAMD64XORL) 49424 v.AddArg(x) 49425 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) 49426 v0.AddArg(y) 49427 v.AddArg(v0) 49428 return true 49429 } 49430 return false 49431 } 49432 func rewriteValueAMD64_OpAMD64XORLmodify_0(v *Value) bool { 49433 // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) 49434 // cond: is32Bit(off1+off2) 49435 // result: (XORLmodify [off1+off2] {sym} base val mem) 49436 for { 49437 off1 := v.AuxInt 49438 sym := v.Aux 49439 mem := v.Args[2] 49440 v_0 := v.Args[0] 49441 if v_0.Op != OpAMD64ADDQconst { 49442 break 49443 } 49444 off2 := v_0.AuxInt 49445 base := v_0.Args[0] 49446 val := v.Args[1] 49447 if !(is32Bit(off1 + off2)) { 49448 break 49449 } 49450 v.reset(OpAMD64XORLmodify) 49451 v.AuxInt = off1 + off2 49452 v.Aux = sym 49453 v.AddArg(base) 49454 v.AddArg(val) 49455 v.AddArg(mem) 49456 return true 49457 } 49458 // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 49459 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 49460 // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 49461 for { 49462 off1 := v.AuxInt 49463 sym1 := v.Aux 49464 mem := v.Args[2] 49465 v_0 := v.Args[0] 49466 if v_0.Op != OpAMD64LEAQ { 49467 break 49468 } 49469 off2 := v_0.AuxInt 49470 sym2 := v_0.Aux 49471 base := v_0.Args[0] 49472 val := v.Args[1] 49473 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 49474 break 49475 } 49476 v.reset(OpAMD64XORLmodify) 49477 v.AuxInt = off1 + off2 49478 v.Aux = mergeSym(sym1, sym2) 49479 v.AddArg(base) 49480 v.AddArg(val) 49481 v.AddArg(mem) 49482 return true 49483 } 49484 return false 49485 } 49486 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { 49487 // match: (XORQ (SHLQ (MOVQconst [1]) y) x) 49488 // result: (BTCQ x y) 49489 for { 49490 x := v.Args[1] 49491 v_0 := v.Args[0] 49492 if v_0.Op != OpAMD64SHLQ { 49493 break 49494 } 49495 y := v_0.Args[1] 49496 v_0_0 := v_0.Args[0] 49497 if v_0_0.Op != OpAMD64MOVQconst || v_0_0.AuxInt != 1 { 49498 break 49499 } 49500 v.reset(OpAMD64BTCQ) 49501 v.AddArg(x) 49502 v.AddArg(y) 49503 return true 49504 } 49505 // match: (XORQ x (SHLQ (MOVQconst [1]) y)) 49506 // result: (BTCQ x y) 49507 for { 49508 _ = v.Args[1] 49509 x := v.Args[0] 49510 v_1 := v.Args[1] 49511 if v_1.Op != OpAMD64SHLQ { 49512 break 49513 } 49514 y := v_1.Args[1] 49515 v_1_0 := v_1.Args[0] 49516 if v_1_0.Op != OpAMD64MOVQconst || v_1_0.AuxInt != 1 { 49517 break 49518 } 49519 v.reset(OpAMD64BTCQ) 49520 v.AddArg(x) 49521 v.AddArg(y) 49522 return true 49523 } 49524 // match: (XORQ (MOVQconst [c]) x) 49525 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 49526 // result: (BTCQconst [log2(c)] x) 49527 for { 49528 x := v.Args[1] 49529 v_0 := v.Args[0] 49530 if v_0.Op != OpAMD64MOVQconst { 49531 break 49532 } 49533 c := v_0.AuxInt 49534 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { 49535 break 49536 } 49537 v.reset(OpAMD64BTCQconst) 49538 v.AuxInt = log2(c) 49539 v.AddArg(x) 49540 return true 49541 } 49542 // match: (XORQ x (MOVQconst [c])) 49543 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 49544 // result: (BTCQconst [log2(c)] x) 49545 for { 49546 _ = v.Args[1] 49547 x := v.Args[0] 49548 v_1 := v.Args[1] 49549 if v_1.Op != OpAMD64MOVQconst { 49550 break 49551 } 49552 c := v_1.AuxInt 49553 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { 49554 break 49555 } 49556 v.reset(OpAMD64BTCQconst) 49557 v.AuxInt = log2(c) 49558 v.AddArg(x) 49559 return true 49560 } 49561 // match: (XORQ x (MOVQconst [c])) 49562 // cond: is32Bit(c) 49563 // result: (XORQconst [c] x) 49564 for { 49565 _ = v.Args[1] 49566 x := v.Args[0] 49567 v_1 := v.Args[1] 49568 if v_1.Op != OpAMD64MOVQconst { 49569 break 49570 } 49571 c := v_1.AuxInt 49572 if !(is32Bit(c)) { 49573 break 49574 } 49575 v.reset(OpAMD64XORQconst) 49576 v.AuxInt = c 49577 v.AddArg(x) 49578 return true 49579 } 49580 // match: (XORQ (MOVQconst [c]) x) 49581 // cond: is32Bit(c) 49582 // result: (XORQconst [c] x) 49583 for { 49584 x := v.Args[1] 49585 v_0 := v.Args[0] 49586 if v_0.Op != OpAMD64MOVQconst { 49587 break 49588 } 49589 c := v_0.AuxInt 49590 if !(is32Bit(c)) { 49591 break 49592 } 49593 v.reset(OpAMD64XORQconst) 49594 v.AuxInt = c 49595 v.AddArg(x) 49596 return true 49597 } 49598 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 49599 // cond: d==64-c 49600 // result: (ROLQconst x [c]) 49601 for { 49602 _ = v.Args[1] 49603 v_0 := v.Args[0] 49604 if v_0.Op != OpAMD64SHLQconst { 49605 break 49606 } 49607 c := v_0.AuxInt 49608 x := v_0.Args[0] 49609 v_1 := v.Args[1] 49610 if v_1.Op != OpAMD64SHRQconst { 49611 break 49612 } 49613 d := v_1.AuxInt 49614 if x != v_1.Args[0] || !(d == 64-c) { 49615 break 49616 } 49617 v.reset(OpAMD64ROLQconst) 49618 v.AuxInt = c 49619 v.AddArg(x) 49620 return true 49621 } 49622 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 49623 // cond: d==64-c 49624 // result: (ROLQconst x [c]) 49625 for { 49626 _ = v.Args[1] 49627 v_0 := v.Args[0] 49628 if v_0.Op != OpAMD64SHRQconst { 49629 break 49630 } 49631 d := v_0.AuxInt 49632 x := v_0.Args[0] 49633 v_1 := v.Args[1] 49634 if v_1.Op != OpAMD64SHLQconst { 49635 break 49636 } 49637 c := v_1.AuxInt 49638 if x != v_1.Args[0] || !(d == 64-c) { 49639 break 49640 } 49641 v.reset(OpAMD64ROLQconst) 49642 v.AuxInt = c 49643 v.AddArg(x) 49644 return true 49645 } 49646 // match: (XORQ x x) 49647 // result: (MOVQconst [0]) 49648 for { 49649 x := v.Args[1] 49650 if x != v.Args[0] { 49651 break 49652 } 49653 v.reset(OpAMD64MOVQconst) 49654 v.AuxInt = 0 49655 return true 49656 } 49657 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 49658 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 49659 // result: (XORQload x [off] {sym} ptr mem) 49660 for { 49661 _ = v.Args[1] 49662 x := v.Args[0] 49663 l := v.Args[1] 49664 if l.Op != OpAMD64MOVQload { 49665 break 49666 } 49667 off := l.AuxInt 49668 sym := l.Aux 49669 mem := l.Args[1] 49670 ptr := l.Args[0] 49671 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 49672 break 49673 } 49674 v.reset(OpAMD64XORQload) 49675 v.AuxInt = off 49676 v.Aux = sym 49677 v.AddArg(x) 49678 v.AddArg(ptr) 49679 v.AddArg(mem) 49680 return true 49681 } 49682 return false 49683 } 49684 func rewriteValueAMD64_OpAMD64XORQ_10(v *Value) bool { 49685 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 49686 // cond: canMergeLoadClobber(v, l, x) && clobber(l) 49687 // result: (XORQload x [off] {sym} ptr mem) 49688 for { 49689 x := v.Args[1] 49690 l := v.Args[0] 49691 if l.Op != OpAMD64MOVQload { 49692 break 49693 } 49694 off := l.AuxInt 49695 sym := l.Aux 49696 mem := l.Args[1] 49697 ptr := l.Args[0] 49698 if !(canMergeLoadClobber(v, l, x) && clobber(l)) { 49699 break 49700 } 49701 v.reset(OpAMD64XORQload) 49702 v.AuxInt = off 49703 v.Aux = sym 49704 v.AddArg(x) 49705 v.AddArg(ptr) 49706 v.AddArg(mem) 49707 return true 49708 } 49709 return false 49710 } 49711 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { 49712 // match: (XORQconst [c] x) 49713 // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 49714 // result: (BTCQconst [log2(c)] x) 49715 for { 49716 c := v.AuxInt 49717 x := v.Args[0] 49718 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { 49719 break 49720 } 49721 v.reset(OpAMD64BTCQconst) 49722 v.AuxInt = log2(c) 49723 v.AddArg(x) 49724 return true 49725 } 49726 // match: (XORQconst [c] (XORQconst [d] x)) 49727 // result: (XORQconst [c ^ d] x) 49728 for { 49729 c := v.AuxInt 49730 v_0 := v.Args[0] 49731 if v_0.Op != OpAMD64XORQconst { 49732 break 49733 } 49734 d := v_0.AuxInt 49735 x := v_0.Args[0] 49736 v.reset(OpAMD64XORQconst) 49737 v.AuxInt = c ^ d 49738 v.AddArg(x) 49739 return true 49740 } 49741 // match: (XORQconst [c] (BTCQconst [d] x)) 49742 // result: (XORQconst [c ^ 1<<uint32(d)] x) 49743 for { 49744 c := v.AuxInt 49745 v_0 := v.Args[0] 49746 if v_0.Op != OpAMD64BTCQconst { 49747 break 49748 } 49749 d := v_0.AuxInt 49750 x := v_0.Args[0] 49751 v.reset(OpAMD64XORQconst) 49752 v.AuxInt = c ^ 1<<uint32(d) 49753 v.AddArg(x) 49754 return true 49755 } 49756 // match: (XORQconst [0] x) 49757 // result: x 49758 for { 49759 if v.AuxInt != 0 { 49760 break 49761 } 49762 x := v.Args[0] 49763 v.reset(OpCopy) 49764 v.Type = x.Type 49765 v.AddArg(x) 49766 return true 49767 } 49768 // match: (XORQconst [c] (MOVQconst [d])) 49769 // result: (MOVQconst [c^d]) 49770 for { 49771 c := v.AuxInt 49772 v_0 := v.Args[0] 49773 if v_0.Op != OpAMD64MOVQconst { 49774 break 49775 } 49776 d := v_0.AuxInt 49777 v.reset(OpAMD64MOVQconst) 49778 v.AuxInt = c ^ d 49779 return true 49780 } 49781 return false 49782 } 49783 func rewriteValueAMD64_OpAMD64XORQconstmodify_0(v *Value) bool { 49784 // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) 49785 // cond: ValAndOff(valoff1).canAdd(off2) 49786 // result: (XORQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) 49787 for { 49788 valoff1 := v.AuxInt 49789 sym := v.Aux 49790 mem := v.Args[1] 49791 v_0 := v.Args[0] 49792 if v_0.Op != OpAMD64ADDQconst { 49793 break 49794 } 49795 off2 := v_0.AuxInt 49796 base := v_0.Args[0] 49797 if !(ValAndOff(valoff1).canAdd(off2)) { 49798 break 49799 } 49800 v.reset(OpAMD64XORQconstmodify) 49801 v.AuxInt = ValAndOff(valoff1).add(off2) 49802 v.Aux = sym 49803 v.AddArg(base) 49804 v.AddArg(mem) 49805 return true 49806 } 49807 // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) 49808 // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) 49809 // result: (XORQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) 49810 for { 49811 valoff1 := v.AuxInt 49812 sym1 := v.Aux 49813 mem := v.Args[1] 49814 v_0 := v.Args[0] 49815 if v_0.Op != OpAMD64LEAQ { 49816 break 49817 } 49818 off2 := v_0.AuxInt 49819 sym2 := v_0.Aux 49820 base := v_0.Args[0] 49821 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { 49822 break 49823 } 49824 v.reset(OpAMD64XORQconstmodify) 49825 v.AuxInt = ValAndOff(valoff1).add(off2) 49826 v.Aux = mergeSym(sym1, sym2) 49827 v.AddArg(base) 49828 v.AddArg(mem) 49829 return true 49830 } 49831 return false 49832 } 49833 func rewriteValueAMD64_OpAMD64XORQload_0(v *Value) bool { 49834 b := v.Block 49835 typ := &b.Func.Config.Types 49836 // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem) 49837 // cond: is32Bit(off1+off2) 49838 // result: (XORQload [off1+off2] {sym} val base mem) 49839 for { 49840 off1 := v.AuxInt 49841 sym := v.Aux 49842 mem := v.Args[2] 49843 val := v.Args[0] 49844 v_1 := v.Args[1] 49845 if v_1.Op != OpAMD64ADDQconst { 49846 break 49847 } 49848 off2 := v_1.AuxInt 49849 base := v_1.Args[0] 49850 if !(is32Bit(off1 + off2)) { 49851 break 49852 } 49853 v.reset(OpAMD64XORQload) 49854 v.AuxInt = off1 + off2 49855 v.Aux = sym 49856 v.AddArg(val) 49857 v.AddArg(base) 49858 v.AddArg(mem) 49859 return true 49860 } 49861 // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) 49862 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 49863 // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) 49864 for { 49865 off1 := v.AuxInt 49866 sym1 := v.Aux 49867 mem := v.Args[2] 49868 val := v.Args[0] 49869 v_1 := v.Args[1] 49870 if v_1.Op != OpAMD64LEAQ { 49871 break 49872 } 49873 off2 := v_1.AuxInt 49874 sym2 := v_1.Aux 49875 base := v_1.Args[0] 49876 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 49877 break 49878 } 49879 v.reset(OpAMD64XORQload) 49880 v.AuxInt = off1 + off2 49881 v.Aux = mergeSym(sym1, sym2) 49882 v.AddArg(val) 49883 v.AddArg(base) 49884 v.AddArg(mem) 49885 return true 49886 } 49887 // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 49888 // result: (XORQ x (MOVQf2i y)) 49889 for { 49890 off := v.AuxInt 49891 sym := v.Aux 49892 _ = v.Args[2] 49893 x := v.Args[0] 49894 ptr := v.Args[1] 49895 v_2 := v.Args[2] 49896 if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { 49897 break 49898 } 49899 _ = v_2.Args[2] 49900 if ptr != v_2.Args[0] { 49901 break 49902 } 49903 y := v_2.Args[1] 49904 v.reset(OpAMD64XORQ) 49905 v.AddArg(x) 49906 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) 49907 v0.AddArg(y) 49908 v.AddArg(v0) 49909 return true 49910 } 49911 return false 49912 } 49913 func rewriteValueAMD64_OpAMD64XORQmodify_0(v *Value) bool { 49914 // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) 49915 // cond: is32Bit(off1+off2) 49916 // result: (XORQmodify [off1+off2] {sym} base val mem) 49917 for { 49918 off1 := v.AuxInt 49919 sym := v.Aux 49920 mem := v.Args[2] 49921 v_0 := v.Args[0] 49922 if v_0.Op != OpAMD64ADDQconst { 49923 break 49924 } 49925 off2 := v_0.AuxInt 49926 base := v_0.Args[0] 49927 val := v.Args[1] 49928 if !(is32Bit(off1 + off2)) { 49929 break 49930 } 49931 v.reset(OpAMD64XORQmodify) 49932 v.AuxInt = off1 + off2 49933 v.Aux = sym 49934 v.AddArg(base) 49935 v.AddArg(val) 49936 v.AddArg(mem) 49937 return true 49938 } 49939 // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 49940 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 49941 // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) 49942 for { 49943 off1 := v.AuxInt 49944 sym1 := v.Aux 49945 mem := v.Args[2] 49946 v_0 := v.Args[0] 49947 if v_0.Op != OpAMD64LEAQ { 49948 break 49949 } 49950 off2 := v_0.AuxInt 49951 sym2 := v_0.Aux 49952 base := v_0.Args[0] 49953 val := v.Args[1] 49954 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 49955 break 49956 } 49957 v.reset(OpAMD64XORQmodify) 49958 v.AuxInt = off1 + off2 49959 v.Aux = mergeSym(sym1, sym2) 49960 v.AddArg(base) 49961 v.AddArg(val) 49962 v.AddArg(mem) 49963 return true 49964 } 49965 return false 49966 } 49967 func rewriteValueAMD64_OpAdd16_0(v *Value) bool { 49968 // match: (Add16 x y) 49969 // result: (ADDL x y) 49970 for { 49971 y := v.Args[1] 49972 x := v.Args[0] 49973 v.reset(OpAMD64ADDL) 49974 v.AddArg(x) 49975 v.AddArg(y) 49976 return true 49977 } 49978 } 49979 func rewriteValueAMD64_OpAdd32_0(v *Value) bool { 49980 // match: (Add32 x y) 49981 // result: (ADDL x y) 49982 for { 49983 y := v.Args[1] 49984 x := v.Args[0] 49985 v.reset(OpAMD64ADDL) 49986 v.AddArg(x) 49987 v.AddArg(y) 49988 return true 49989 } 49990 } 49991 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool { 49992 // match: (Add32F x y) 49993 // result: (ADDSS x y) 49994 for { 49995 y := v.Args[1] 49996 x := v.Args[0] 49997 v.reset(OpAMD64ADDSS) 49998 v.AddArg(x) 49999 v.AddArg(y) 50000 return true 50001 } 50002 } 50003 func rewriteValueAMD64_OpAdd64_0(v *Value) bool { 50004 // match: (Add64 x y) 50005 // result: (ADDQ x y) 50006 for { 50007 y := v.Args[1] 50008 x := v.Args[0] 50009 v.reset(OpAMD64ADDQ) 50010 v.AddArg(x) 50011 v.AddArg(y) 50012 return true 50013 } 50014 } 50015 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool { 50016 // match: (Add64F x y) 50017 // result: (ADDSD x y) 50018 for { 50019 y := v.Args[1] 50020 x := v.Args[0] 50021 v.reset(OpAMD64ADDSD) 50022 v.AddArg(x) 50023 v.AddArg(y) 50024 return true 50025 } 50026 } 50027 func rewriteValueAMD64_OpAdd8_0(v *Value) bool { 50028 // match: (Add8 x y) 50029 // result: (ADDL x y) 50030 for { 50031 y := v.Args[1] 50032 x := v.Args[0] 50033 v.reset(OpAMD64ADDL) 50034 v.AddArg(x) 50035 v.AddArg(y) 50036 return true 50037 } 50038 } 50039 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { 50040 // match: (AddPtr x y) 50041 // result: (ADDQ x y) 50042 for { 50043 y := v.Args[1] 50044 x := v.Args[0] 50045 v.reset(OpAMD64ADDQ) 50046 v.AddArg(x) 50047 v.AddArg(y) 50048 return true 50049 } 50050 } 50051 func rewriteValueAMD64_OpAddr_0(v *Value) bool { 50052 // match: (Addr {sym} base) 50053 // result: (LEAQ {sym} base) 50054 for { 50055 sym := v.Aux 50056 base := v.Args[0] 50057 v.reset(OpAMD64LEAQ) 50058 v.Aux = sym 50059 v.AddArg(base) 50060 return true 50061 } 50062 } 50063 func rewriteValueAMD64_OpAnd16_0(v *Value) bool { 50064 // match: (And16 x y) 50065 // result: (ANDL x y) 50066 for { 50067 y := v.Args[1] 50068 x := v.Args[0] 50069 v.reset(OpAMD64ANDL) 50070 v.AddArg(x) 50071 v.AddArg(y) 50072 return true 50073 } 50074 } 50075 func rewriteValueAMD64_OpAnd32_0(v *Value) bool { 50076 // match: (And32 x y) 50077 // result: (ANDL x y) 50078 for { 50079 y := v.Args[1] 50080 x := v.Args[0] 50081 v.reset(OpAMD64ANDL) 50082 v.AddArg(x) 50083 v.AddArg(y) 50084 return true 50085 } 50086 } 50087 func rewriteValueAMD64_OpAnd64_0(v *Value) bool { 50088 // match: (And64 x y) 50089 // result: (ANDQ x y) 50090 for { 50091 y := v.Args[1] 50092 x := v.Args[0] 50093 v.reset(OpAMD64ANDQ) 50094 v.AddArg(x) 50095 v.AddArg(y) 50096 return true 50097 } 50098 } 50099 func rewriteValueAMD64_OpAnd8_0(v *Value) bool { 50100 // match: (And8 x y) 50101 // result: (ANDL x y) 50102 for { 50103 y := v.Args[1] 50104 x := v.Args[0] 50105 v.reset(OpAMD64ANDL) 50106 v.AddArg(x) 50107 v.AddArg(y) 50108 return true 50109 } 50110 } 50111 func rewriteValueAMD64_OpAndB_0(v *Value) bool { 50112 // match: (AndB x y) 50113 // result: (ANDL x y) 50114 for { 50115 y := v.Args[1] 50116 x := v.Args[0] 50117 v.reset(OpAMD64ANDL) 50118 v.AddArg(x) 50119 v.AddArg(y) 50120 return true 50121 } 50122 } 50123 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { 50124 b := v.Block 50125 typ := &b.Func.Config.Types 50126 // match: (AtomicAdd32 ptr val mem) 50127 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 50128 for { 50129 mem := v.Args[2] 50130 ptr := v.Args[0] 50131 val := v.Args[1] 50132 v.reset(OpAMD64AddTupleFirst32) 50133 v.AddArg(val) 50134 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 50135 v0.AddArg(val) 50136 v0.AddArg(ptr) 50137 v0.AddArg(mem) 50138 v.AddArg(v0) 50139 return true 50140 } 50141 } 50142 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { 50143 b := v.Block 50144 typ := &b.Func.Config.Types 50145 // match: (AtomicAdd64 ptr val mem) 50146 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 50147 for { 50148 mem := v.Args[2] 50149 ptr := v.Args[0] 50150 val := v.Args[1] 50151 v.reset(OpAMD64AddTupleFirst64) 50152 v.AddArg(val) 50153 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 50154 v0.AddArg(val) 50155 v0.AddArg(ptr) 50156 v0.AddArg(mem) 50157 v.AddArg(v0) 50158 return true 50159 } 50160 } 50161 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool { 50162 // match: (AtomicAnd8 ptr val mem) 50163 // result: (ANDBlock ptr val mem) 50164 for { 50165 mem := v.Args[2] 50166 ptr := v.Args[0] 50167 val := v.Args[1] 50168 v.reset(OpAMD64ANDBlock) 50169 v.AddArg(ptr) 50170 v.AddArg(val) 50171 v.AddArg(mem) 50172 return true 50173 } 50174 } 50175 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool { 50176 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 50177 // result: (CMPXCHGLlock ptr old new_ mem) 50178 for { 50179 mem := v.Args[3] 50180 ptr := v.Args[0] 50181 old := v.Args[1] 50182 new_ := v.Args[2] 50183 v.reset(OpAMD64CMPXCHGLlock) 50184 v.AddArg(ptr) 50185 v.AddArg(old) 50186 v.AddArg(new_) 50187 v.AddArg(mem) 50188 return true 50189 } 50190 } 50191 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool { 50192 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 50193 // result: (CMPXCHGQlock ptr old new_ mem) 50194 for { 50195 mem := v.Args[3] 50196 ptr := v.Args[0] 50197 old := v.Args[1] 50198 new_ := v.Args[2] 50199 v.reset(OpAMD64CMPXCHGQlock) 50200 v.AddArg(ptr) 50201 v.AddArg(old) 50202 v.AddArg(new_) 50203 v.AddArg(mem) 50204 return true 50205 } 50206 } 50207 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool { 50208 // match: (AtomicExchange32 ptr val mem) 50209 // result: (XCHGL val ptr mem) 50210 for { 50211 mem := v.Args[2] 50212 ptr := v.Args[0] 50213 val := v.Args[1] 50214 v.reset(OpAMD64XCHGL) 50215 v.AddArg(val) 50216 v.AddArg(ptr) 50217 v.AddArg(mem) 50218 return true 50219 } 50220 } 50221 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool { 50222 // match: (AtomicExchange64 ptr val mem) 50223 // result: (XCHGQ val ptr mem) 50224 for { 50225 mem := v.Args[2] 50226 ptr := v.Args[0] 50227 val := v.Args[1] 50228 v.reset(OpAMD64XCHGQ) 50229 v.AddArg(val) 50230 v.AddArg(ptr) 50231 v.AddArg(mem) 50232 return true 50233 } 50234 } 50235 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool { 50236 // match: (AtomicLoad32 ptr mem) 50237 // result: (MOVLatomicload ptr mem) 50238 for { 50239 mem := v.Args[1] 50240 ptr := v.Args[0] 50241 v.reset(OpAMD64MOVLatomicload) 50242 v.AddArg(ptr) 50243 v.AddArg(mem) 50244 return true 50245 } 50246 } 50247 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool { 50248 // match: (AtomicLoad64 ptr mem) 50249 // result: (MOVQatomicload ptr mem) 50250 for { 50251 mem := v.Args[1] 50252 ptr := v.Args[0] 50253 v.reset(OpAMD64MOVQatomicload) 50254 v.AddArg(ptr) 50255 v.AddArg(mem) 50256 return true 50257 } 50258 } 50259 func rewriteValueAMD64_OpAtomicLoad8_0(v *Value) bool { 50260 // match: (AtomicLoad8 ptr mem) 50261 // result: (MOVBatomicload ptr mem) 50262 for { 50263 mem := v.Args[1] 50264 ptr := v.Args[0] 50265 v.reset(OpAMD64MOVBatomicload) 50266 v.AddArg(ptr) 50267 v.AddArg(mem) 50268 return true 50269 } 50270 } 50271 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { 50272 // match: (AtomicLoadPtr ptr mem) 50273 // result: (MOVQatomicload ptr mem) 50274 for { 50275 mem := v.Args[1] 50276 ptr := v.Args[0] 50277 v.reset(OpAMD64MOVQatomicload) 50278 v.AddArg(ptr) 50279 v.AddArg(mem) 50280 return true 50281 } 50282 } 50283 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { 50284 // match: (AtomicOr8 ptr val mem) 50285 // result: (ORBlock ptr val mem) 50286 for { 50287 mem := v.Args[2] 50288 ptr := v.Args[0] 50289 val := v.Args[1] 50290 v.reset(OpAMD64ORBlock) 50291 v.AddArg(ptr) 50292 v.AddArg(val) 50293 v.AddArg(mem) 50294 return true 50295 } 50296 } 50297 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { 50298 b := v.Block 50299 typ := &b.Func.Config.Types 50300 // match: (AtomicStore32 ptr val mem) 50301 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 50302 for { 50303 mem := v.Args[2] 50304 ptr := v.Args[0] 50305 val := v.Args[1] 50306 v.reset(OpSelect1) 50307 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 50308 v0.AddArg(val) 50309 v0.AddArg(ptr) 50310 v0.AddArg(mem) 50311 v.AddArg(v0) 50312 return true 50313 } 50314 } 50315 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { 50316 b := v.Block 50317 typ := &b.Func.Config.Types 50318 // match: (AtomicStore64 ptr val mem) 50319 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 50320 for { 50321 mem := v.Args[2] 50322 ptr := v.Args[0] 50323 val := v.Args[1] 50324 v.reset(OpSelect1) 50325 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 50326 v0.AddArg(val) 50327 v0.AddArg(ptr) 50328 v0.AddArg(mem) 50329 v.AddArg(v0) 50330 return true 50331 } 50332 } 50333 func rewriteValueAMD64_OpAtomicStore8_0(v *Value) bool { 50334 b := v.Block 50335 typ := &b.Func.Config.Types 50336 // match: (AtomicStore8 ptr val mem) 50337 // result: (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem)) 50338 for { 50339 mem := v.Args[2] 50340 ptr := v.Args[0] 50341 val := v.Args[1] 50342 v.reset(OpSelect1) 50343 v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) 50344 v0.AddArg(val) 50345 v0.AddArg(ptr) 50346 v0.AddArg(mem) 50347 v.AddArg(v0) 50348 return true 50349 } 50350 } 50351 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { 50352 b := v.Block 50353 typ := &b.Func.Config.Types 50354 // match: (AtomicStorePtrNoWB ptr val mem) 50355 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 50356 for { 50357 mem := v.Args[2] 50358 ptr := v.Args[0] 50359 val := v.Args[1] 50360 v.reset(OpSelect1) 50361 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 50362 v0.AddArg(val) 50363 v0.AddArg(ptr) 50364 v0.AddArg(mem) 50365 v.AddArg(v0) 50366 return true 50367 } 50368 } 50369 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { 50370 // match: (Avg64u x y) 50371 // result: (AVGQU x y) 50372 for { 50373 y := v.Args[1] 50374 x := v.Args[0] 50375 v.reset(OpAMD64AVGQU) 50376 v.AddArg(x) 50377 v.AddArg(y) 50378 return true 50379 } 50380 } 50381 func rewriteValueAMD64_OpBitLen16_0(v *Value) bool { 50382 b := v.Block 50383 typ := &b.Func.Config.Types 50384 // match: (BitLen16 x) 50385 // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x))) 50386 for { 50387 x := v.Args[0] 50388 v.reset(OpAMD64BSRL) 50389 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) 50390 v0.AuxInt = 1 50391 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 50392 v1.AddArg(x) 50393 v0.AddArg(v1) 50394 v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 50395 v2.AddArg(x) 50396 v0.AddArg(v2) 50397 v.AddArg(v0) 50398 return true 50399 } 50400 } 50401 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { 50402 b := v.Block 50403 typ := &b.Func.Config.Types 50404 // match: (BitLen32 x) 50405 // result: (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x)))) 50406 for { 50407 x := v.Args[0] 50408 v.reset(OpSelect0) 50409 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 50410 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) 50411 v1.AuxInt = 1 50412 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 50413 v2.AddArg(x) 50414 v1.AddArg(v2) 50415 v3 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 50416 v3.AddArg(x) 50417 v1.AddArg(v3) 50418 v0.AddArg(v1) 50419 v.AddArg(v0) 50420 return true 50421 } 50422 } 50423 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { 50424 b := v.Block 50425 typ := &b.Func.Config.Types 50426 // match: (BitLen64 <t> x) 50427 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 50428 for { 50429 t := v.Type 50430 x := v.Args[0] 50431 v.reset(OpAMD64ADDQconst) 50432 v.AuxInt = 1 50433 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 50434 v1 := b.NewValue0(v.Pos, OpSelect0, t) 50435 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 50436 v2.AddArg(x) 50437 v1.AddArg(v2) 50438 v0.AddArg(v1) 50439 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 50440 v3.AuxInt = -1 50441 v0.AddArg(v3) 50442 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 50443 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 50444 v5.AddArg(x) 50445 v4.AddArg(v5) 50446 v0.AddArg(v4) 50447 v.AddArg(v0) 50448 return true 50449 } 50450 } 50451 func rewriteValueAMD64_OpBitLen8_0(v *Value) bool { 50452 b := v.Block 50453 typ := &b.Func.Config.Types 50454 // match: (BitLen8 x) 50455 // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x))) 50456 for { 50457 x := v.Args[0] 50458 v.reset(OpAMD64BSRL) 50459 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) 50460 v0.AuxInt = 1 50461 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 50462 v1.AddArg(x) 50463 v0.AddArg(v1) 50464 v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 50465 v2.AddArg(x) 50466 v0.AddArg(v2) 50467 v.AddArg(v0) 50468 return true 50469 } 50470 } 50471 func rewriteValueAMD64_OpBswap32_0(v *Value) bool { 50472 // match: (Bswap32 x) 50473 // result: (BSWAPL x) 50474 for { 50475 x := v.Args[0] 50476 v.reset(OpAMD64BSWAPL) 50477 v.AddArg(x) 50478 return true 50479 } 50480 } 50481 func rewriteValueAMD64_OpBswap64_0(v *Value) bool { 50482 // match: (Bswap64 x) 50483 // result: (BSWAPQ x) 50484 for { 50485 x := v.Args[0] 50486 v.reset(OpAMD64BSWAPQ) 50487 v.AddArg(x) 50488 return true 50489 } 50490 } 50491 func rewriteValueAMD64_OpCeil_0(v *Value) bool { 50492 // match: (Ceil x) 50493 // result: (ROUNDSD [2] x) 50494 for { 50495 x := v.Args[0] 50496 v.reset(OpAMD64ROUNDSD) 50497 v.AuxInt = 2 50498 v.AddArg(x) 50499 return true 50500 } 50501 } 50502 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { 50503 // match: (ClosureCall [argwid] entry closure mem) 50504 // result: (CALLclosure [argwid] entry closure mem) 50505 for { 50506 argwid := v.AuxInt 50507 mem := v.Args[2] 50508 entry := v.Args[0] 50509 closure := v.Args[1] 50510 v.reset(OpAMD64CALLclosure) 50511 v.AuxInt = argwid 50512 v.AddArg(entry) 50513 v.AddArg(closure) 50514 v.AddArg(mem) 50515 return true 50516 } 50517 } 50518 func rewriteValueAMD64_OpCom16_0(v *Value) bool { 50519 // match: (Com16 x) 50520 // result: (NOTL x) 50521 for { 50522 x := v.Args[0] 50523 v.reset(OpAMD64NOTL) 50524 v.AddArg(x) 50525 return true 50526 } 50527 } 50528 func rewriteValueAMD64_OpCom32_0(v *Value) bool { 50529 // match: (Com32 x) 50530 // result: (NOTL x) 50531 for { 50532 x := v.Args[0] 50533 v.reset(OpAMD64NOTL) 50534 v.AddArg(x) 50535 return true 50536 } 50537 } 50538 func rewriteValueAMD64_OpCom64_0(v *Value) bool { 50539 // match: (Com64 x) 50540 // result: (NOTQ x) 50541 for { 50542 x := v.Args[0] 50543 v.reset(OpAMD64NOTQ) 50544 v.AddArg(x) 50545 return true 50546 } 50547 } 50548 func rewriteValueAMD64_OpCom8_0(v *Value) bool { 50549 // match: (Com8 x) 50550 // result: (NOTL x) 50551 for { 50552 x := v.Args[0] 50553 v.reset(OpAMD64NOTL) 50554 v.AddArg(x) 50555 return true 50556 } 50557 } 50558 func rewriteValueAMD64_OpCondSelect_0(v *Value) bool { 50559 // match: (CondSelect <t> x y (SETEQ cond)) 50560 // cond: (is64BitInt(t) || isPtr(t)) 50561 // result: (CMOVQEQ y x cond) 50562 for { 50563 t := v.Type 50564 _ = v.Args[2] 50565 x := v.Args[0] 50566 y := v.Args[1] 50567 v_2 := v.Args[2] 50568 if v_2.Op != OpAMD64SETEQ { 50569 break 50570 } 50571 cond := v_2.Args[0] 50572 if !(is64BitInt(t) || isPtr(t)) { 50573 break 50574 } 50575 v.reset(OpAMD64CMOVQEQ) 50576 v.AddArg(y) 50577 v.AddArg(x) 50578 v.AddArg(cond) 50579 return true 50580 } 50581 // match: (CondSelect <t> x y (SETNE cond)) 50582 // cond: (is64BitInt(t) || isPtr(t)) 50583 // result: (CMOVQNE y x cond) 50584 for { 50585 t := v.Type 50586 _ = v.Args[2] 50587 x := v.Args[0] 50588 y := v.Args[1] 50589 v_2 := v.Args[2] 50590 if v_2.Op != OpAMD64SETNE { 50591 break 50592 } 50593 cond := v_2.Args[0] 50594 if !(is64BitInt(t) || isPtr(t)) { 50595 break 50596 } 50597 v.reset(OpAMD64CMOVQNE) 50598 v.AddArg(y) 50599 v.AddArg(x) 50600 v.AddArg(cond) 50601 return true 50602 } 50603 // match: (CondSelect <t> x y (SETL cond)) 50604 // cond: (is64BitInt(t) || isPtr(t)) 50605 // result: (CMOVQLT y x cond) 50606 for { 50607 t := v.Type 50608 _ = v.Args[2] 50609 x := v.Args[0] 50610 y := v.Args[1] 50611 v_2 := v.Args[2] 50612 if v_2.Op != OpAMD64SETL { 50613 break 50614 } 50615 cond := v_2.Args[0] 50616 if !(is64BitInt(t) || isPtr(t)) { 50617 break 50618 } 50619 v.reset(OpAMD64CMOVQLT) 50620 v.AddArg(y) 50621 v.AddArg(x) 50622 v.AddArg(cond) 50623 return true 50624 } 50625 // match: (CondSelect <t> x y (SETG cond)) 50626 // cond: (is64BitInt(t) || isPtr(t)) 50627 // result: (CMOVQGT y x cond) 50628 for { 50629 t := v.Type 50630 _ = v.Args[2] 50631 x := v.Args[0] 50632 y := v.Args[1] 50633 v_2 := v.Args[2] 50634 if v_2.Op != OpAMD64SETG { 50635 break 50636 } 50637 cond := v_2.Args[0] 50638 if !(is64BitInt(t) || isPtr(t)) { 50639 break 50640 } 50641 v.reset(OpAMD64CMOVQGT) 50642 v.AddArg(y) 50643 v.AddArg(x) 50644 v.AddArg(cond) 50645 return true 50646 } 50647 // match: (CondSelect <t> x y (SETLE cond)) 50648 // cond: (is64BitInt(t) || isPtr(t)) 50649 // result: (CMOVQLE y x cond) 50650 for { 50651 t := v.Type 50652 _ = v.Args[2] 50653 x := v.Args[0] 50654 y := v.Args[1] 50655 v_2 := v.Args[2] 50656 if v_2.Op != OpAMD64SETLE { 50657 break 50658 } 50659 cond := v_2.Args[0] 50660 if !(is64BitInt(t) || isPtr(t)) { 50661 break 50662 } 50663 v.reset(OpAMD64CMOVQLE) 50664 v.AddArg(y) 50665 v.AddArg(x) 50666 v.AddArg(cond) 50667 return true 50668 } 50669 // match: (CondSelect <t> x y (SETGE cond)) 50670 // cond: (is64BitInt(t) || isPtr(t)) 50671 // result: (CMOVQGE y x cond) 50672 for { 50673 t := v.Type 50674 _ = v.Args[2] 50675 x := v.Args[0] 50676 y := v.Args[1] 50677 v_2 := v.Args[2] 50678 if v_2.Op != OpAMD64SETGE { 50679 break 50680 } 50681 cond := v_2.Args[0] 50682 if !(is64BitInt(t) || isPtr(t)) { 50683 break 50684 } 50685 v.reset(OpAMD64CMOVQGE) 50686 v.AddArg(y) 50687 v.AddArg(x) 50688 v.AddArg(cond) 50689 return true 50690 } 50691 // match: (CondSelect <t> x y (SETA cond)) 50692 // cond: (is64BitInt(t) || isPtr(t)) 50693 // result: (CMOVQHI y x cond) 50694 for { 50695 t := v.Type 50696 _ = v.Args[2] 50697 x := v.Args[0] 50698 y := v.Args[1] 50699 v_2 := v.Args[2] 50700 if v_2.Op != OpAMD64SETA { 50701 break 50702 } 50703 cond := v_2.Args[0] 50704 if !(is64BitInt(t) || isPtr(t)) { 50705 break 50706 } 50707 v.reset(OpAMD64CMOVQHI) 50708 v.AddArg(y) 50709 v.AddArg(x) 50710 v.AddArg(cond) 50711 return true 50712 } 50713 // match: (CondSelect <t> x y (SETB cond)) 50714 // cond: (is64BitInt(t) || isPtr(t)) 50715 // result: (CMOVQCS y x cond) 50716 for { 50717 t := v.Type 50718 _ = v.Args[2] 50719 x := v.Args[0] 50720 y := v.Args[1] 50721 v_2 := v.Args[2] 50722 if v_2.Op != OpAMD64SETB { 50723 break 50724 } 50725 cond := v_2.Args[0] 50726 if !(is64BitInt(t) || isPtr(t)) { 50727 break 50728 } 50729 v.reset(OpAMD64CMOVQCS) 50730 v.AddArg(y) 50731 v.AddArg(x) 50732 v.AddArg(cond) 50733 return true 50734 } 50735 // match: (CondSelect <t> x y (SETAE cond)) 50736 // cond: (is64BitInt(t) || isPtr(t)) 50737 // result: (CMOVQCC y x cond) 50738 for { 50739 t := v.Type 50740 _ = v.Args[2] 50741 x := v.Args[0] 50742 y := v.Args[1] 50743 v_2 := v.Args[2] 50744 if v_2.Op != OpAMD64SETAE { 50745 break 50746 } 50747 cond := v_2.Args[0] 50748 if !(is64BitInt(t) || isPtr(t)) { 50749 break 50750 } 50751 v.reset(OpAMD64CMOVQCC) 50752 v.AddArg(y) 50753 v.AddArg(x) 50754 v.AddArg(cond) 50755 return true 50756 } 50757 // match: (CondSelect <t> x y (SETBE cond)) 50758 // cond: (is64BitInt(t) || isPtr(t)) 50759 // result: (CMOVQLS y x cond) 50760 for { 50761 t := v.Type 50762 _ = v.Args[2] 50763 x := v.Args[0] 50764 y := v.Args[1] 50765 v_2 := v.Args[2] 50766 if v_2.Op != OpAMD64SETBE { 50767 break 50768 } 50769 cond := v_2.Args[0] 50770 if !(is64BitInt(t) || isPtr(t)) { 50771 break 50772 } 50773 v.reset(OpAMD64CMOVQLS) 50774 v.AddArg(y) 50775 v.AddArg(x) 50776 v.AddArg(cond) 50777 return true 50778 } 50779 return false 50780 } 50781 func rewriteValueAMD64_OpCondSelect_10(v *Value) bool { 50782 // match: (CondSelect <t> x y (SETEQF cond)) 50783 // cond: (is64BitInt(t) || isPtr(t)) 50784 // result: (CMOVQEQF y x cond) 50785 for { 50786 t := v.Type 50787 _ = v.Args[2] 50788 x := v.Args[0] 50789 y := v.Args[1] 50790 v_2 := v.Args[2] 50791 if v_2.Op != OpAMD64SETEQF { 50792 break 50793 } 50794 cond := v_2.Args[0] 50795 if !(is64BitInt(t) || isPtr(t)) { 50796 break 50797 } 50798 v.reset(OpAMD64CMOVQEQF) 50799 v.AddArg(y) 50800 v.AddArg(x) 50801 v.AddArg(cond) 50802 return true 50803 } 50804 // match: (CondSelect <t> x y (SETNEF cond)) 50805 // cond: (is64BitInt(t) || isPtr(t)) 50806 // result: (CMOVQNEF y x cond) 50807 for { 50808 t := v.Type 50809 _ = v.Args[2] 50810 x := v.Args[0] 50811 y := v.Args[1] 50812 v_2 := v.Args[2] 50813 if v_2.Op != OpAMD64SETNEF { 50814 break 50815 } 50816 cond := v_2.Args[0] 50817 if !(is64BitInt(t) || isPtr(t)) { 50818 break 50819 } 50820 v.reset(OpAMD64CMOVQNEF) 50821 v.AddArg(y) 50822 v.AddArg(x) 50823 v.AddArg(cond) 50824 return true 50825 } 50826 // match: (CondSelect <t> x y (SETGF cond)) 50827 // cond: (is64BitInt(t) || isPtr(t)) 50828 // result: (CMOVQGTF y x cond) 50829 for { 50830 t := v.Type 50831 _ = v.Args[2] 50832 x := v.Args[0] 50833 y := v.Args[1] 50834 v_2 := v.Args[2] 50835 if v_2.Op != OpAMD64SETGF { 50836 break 50837 } 50838 cond := v_2.Args[0] 50839 if !(is64BitInt(t) || isPtr(t)) { 50840 break 50841 } 50842 v.reset(OpAMD64CMOVQGTF) 50843 v.AddArg(y) 50844 v.AddArg(x) 50845 v.AddArg(cond) 50846 return true 50847 } 50848 // match: (CondSelect <t> x y (SETGEF cond)) 50849 // cond: (is64BitInt(t) || isPtr(t)) 50850 // result: (CMOVQGEF y x cond) 50851 for { 50852 t := v.Type 50853 _ = v.Args[2] 50854 x := v.Args[0] 50855 y := v.Args[1] 50856 v_2 := v.Args[2] 50857 if v_2.Op != OpAMD64SETGEF { 50858 break 50859 } 50860 cond := v_2.Args[0] 50861 if !(is64BitInt(t) || isPtr(t)) { 50862 break 50863 } 50864 v.reset(OpAMD64CMOVQGEF) 50865 v.AddArg(y) 50866 v.AddArg(x) 50867 v.AddArg(cond) 50868 return true 50869 } 50870 // match: (CondSelect <t> x y (SETEQ cond)) 50871 // cond: is32BitInt(t) 50872 // result: (CMOVLEQ y x cond) 50873 for { 50874 t := v.Type 50875 _ = v.Args[2] 50876 x := v.Args[0] 50877 y := v.Args[1] 50878 v_2 := v.Args[2] 50879 if v_2.Op != OpAMD64SETEQ { 50880 break 50881 } 50882 cond := v_2.Args[0] 50883 if !(is32BitInt(t)) { 50884 break 50885 } 50886 v.reset(OpAMD64CMOVLEQ) 50887 v.AddArg(y) 50888 v.AddArg(x) 50889 v.AddArg(cond) 50890 return true 50891 } 50892 // match: (CondSelect <t> x y (SETNE cond)) 50893 // cond: is32BitInt(t) 50894 // result: (CMOVLNE y x cond) 50895 for { 50896 t := v.Type 50897 _ = v.Args[2] 50898 x := v.Args[0] 50899 y := v.Args[1] 50900 v_2 := v.Args[2] 50901 if v_2.Op != OpAMD64SETNE { 50902 break 50903 } 50904 cond := v_2.Args[0] 50905 if !(is32BitInt(t)) { 50906 break 50907 } 50908 v.reset(OpAMD64CMOVLNE) 50909 v.AddArg(y) 50910 v.AddArg(x) 50911 v.AddArg(cond) 50912 return true 50913 } 50914 // match: (CondSelect <t> x y (SETL cond)) 50915 // cond: is32BitInt(t) 50916 // result: (CMOVLLT y x cond) 50917 for { 50918 t := v.Type 50919 _ = v.Args[2] 50920 x := v.Args[0] 50921 y := v.Args[1] 50922 v_2 := v.Args[2] 50923 if v_2.Op != OpAMD64SETL { 50924 break 50925 } 50926 cond := v_2.Args[0] 50927 if !(is32BitInt(t)) { 50928 break 50929 } 50930 v.reset(OpAMD64CMOVLLT) 50931 v.AddArg(y) 50932 v.AddArg(x) 50933 v.AddArg(cond) 50934 return true 50935 } 50936 // match: (CondSelect <t> x y (SETG cond)) 50937 // cond: is32BitInt(t) 50938 // result: (CMOVLGT y x cond) 50939 for { 50940 t := v.Type 50941 _ = v.Args[2] 50942 x := v.Args[0] 50943 y := v.Args[1] 50944 v_2 := v.Args[2] 50945 if v_2.Op != OpAMD64SETG { 50946 break 50947 } 50948 cond := v_2.Args[0] 50949 if !(is32BitInt(t)) { 50950 break 50951 } 50952 v.reset(OpAMD64CMOVLGT) 50953 v.AddArg(y) 50954 v.AddArg(x) 50955 v.AddArg(cond) 50956 return true 50957 } 50958 // match: (CondSelect <t> x y (SETLE cond)) 50959 // cond: is32BitInt(t) 50960 // result: (CMOVLLE y x cond) 50961 for { 50962 t := v.Type 50963 _ = v.Args[2] 50964 x := v.Args[0] 50965 y := v.Args[1] 50966 v_2 := v.Args[2] 50967 if v_2.Op != OpAMD64SETLE { 50968 break 50969 } 50970 cond := v_2.Args[0] 50971 if !(is32BitInt(t)) { 50972 break 50973 } 50974 v.reset(OpAMD64CMOVLLE) 50975 v.AddArg(y) 50976 v.AddArg(x) 50977 v.AddArg(cond) 50978 return true 50979 } 50980 // match: (CondSelect <t> x y (SETGE cond)) 50981 // cond: is32BitInt(t) 50982 // result: (CMOVLGE y x cond) 50983 for { 50984 t := v.Type 50985 _ = v.Args[2] 50986 x := v.Args[0] 50987 y := v.Args[1] 50988 v_2 := v.Args[2] 50989 if v_2.Op != OpAMD64SETGE { 50990 break 50991 } 50992 cond := v_2.Args[0] 50993 if !(is32BitInt(t)) { 50994 break 50995 } 50996 v.reset(OpAMD64CMOVLGE) 50997 v.AddArg(y) 50998 v.AddArg(x) 50999 v.AddArg(cond) 51000 return true 51001 } 51002 return false 51003 } 51004 func rewriteValueAMD64_OpCondSelect_20(v *Value) bool { 51005 // match: (CondSelect <t> x y (SETA cond)) 51006 // cond: is32BitInt(t) 51007 // result: (CMOVLHI y x cond) 51008 for { 51009 t := v.Type 51010 _ = v.Args[2] 51011 x := v.Args[0] 51012 y := v.Args[1] 51013 v_2 := v.Args[2] 51014 if v_2.Op != OpAMD64SETA { 51015 break 51016 } 51017 cond := v_2.Args[0] 51018 if !(is32BitInt(t)) { 51019 break 51020 } 51021 v.reset(OpAMD64CMOVLHI) 51022 v.AddArg(y) 51023 v.AddArg(x) 51024 v.AddArg(cond) 51025 return true 51026 } 51027 // match: (CondSelect <t> x y (SETB cond)) 51028 // cond: is32BitInt(t) 51029 // result: (CMOVLCS y x cond) 51030 for { 51031 t := v.Type 51032 _ = v.Args[2] 51033 x := v.Args[0] 51034 y := v.Args[1] 51035 v_2 := v.Args[2] 51036 if v_2.Op != OpAMD64SETB { 51037 break 51038 } 51039 cond := v_2.Args[0] 51040 if !(is32BitInt(t)) { 51041 break 51042 } 51043 v.reset(OpAMD64CMOVLCS) 51044 v.AddArg(y) 51045 v.AddArg(x) 51046 v.AddArg(cond) 51047 return true 51048 } 51049 // match: (CondSelect <t> x y (SETAE cond)) 51050 // cond: is32BitInt(t) 51051 // result: (CMOVLCC y x cond) 51052 for { 51053 t := v.Type 51054 _ = v.Args[2] 51055 x := v.Args[0] 51056 y := v.Args[1] 51057 v_2 := v.Args[2] 51058 if v_2.Op != OpAMD64SETAE { 51059 break 51060 } 51061 cond := v_2.Args[0] 51062 if !(is32BitInt(t)) { 51063 break 51064 } 51065 v.reset(OpAMD64CMOVLCC) 51066 v.AddArg(y) 51067 v.AddArg(x) 51068 v.AddArg(cond) 51069 return true 51070 } 51071 // match: (CondSelect <t> x y (SETBE cond)) 51072 // cond: is32BitInt(t) 51073 // result: (CMOVLLS y x cond) 51074 for { 51075 t := v.Type 51076 _ = v.Args[2] 51077 x := v.Args[0] 51078 y := v.Args[1] 51079 v_2 := v.Args[2] 51080 if v_2.Op != OpAMD64SETBE { 51081 break 51082 } 51083 cond := v_2.Args[0] 51084 if !(is32BitInt(t)) { 51085 break 51086 } 51087 v.reset(OpAMD64CMOVLLS) 51088 v.AddArg(y) 51089 v.AddArg(x) 51090 v.AddArg(cond) 51091 return true 51092 } 51093 // match: (CondSelect <t> x y (SETEQF cond)) 51094 // cond: is32BitInt(t) 51095 // result: (CMOVLEQF y x cond) 51096 for { 51097 t := v.Type 51098 _ = v.Args[2] 51099 x := v.Args[0] 51100 y := v.Args[1] 51101 v_2 := v.Args[2] 51102 if v_2.Op != OpAMD64SETEQF { 51103 break 51104 } 51105 cond := v_2.Args[0] 51106 if !(is32BitInt(t)) { 51107 break 51108 } 51109 v.reset(OpAMD64CMOVLEQF) 51110 v.AddArg(y) 51111 v.AddArg(x) 51112 v.AddArg(cond) 51113 return true 51114 } 51115 // match: (CondSelect <t> x y (SETNEF cond)) 51116 // cond: is32BitInt(t) 51117 // result: (CMOVLNEF y x cond) 51118 for { 51119 t := v.Type 51120 _ = v.Args[2] 51121 x := v.Args[0] 51122 y := v.Args[1] 51123 v_2 := v.Args[2] 51124 if v_2.Op != OpAMD64SETNEF { 51125 break 51126 } 51127 cond := v_2.Args[0] 51128 if !(is32BitInt(t)) { 51129 break 51130 } 51131 v.reset(OpAMD64CMOVLNEF) 51132 v.AddArg(y) 51133 v.AddArg(x) 51134 v.AddArg(cond) 51135 return true 51136 } 51137 // match: (CondSelect <t> x y (SETGF cond)) 51138 // cond: is32BitInt(t) 51139 // result: (CMOVLGTF y x cond) 51140 for { 51141 t := v.Type 51142 _ = v.Args[2] 51143 x := v.Args[0] 51144 y := v.Args[1] 51145 v_2 := v.Args[2] 51146 if v_2.Op != OpAMD64SETGF { 51147 break 51148 } 51149 cond := v_2.Args[0] 51150 if !(is32BitInt(t)) { 51151 break 51152 } 51153 v.reset(OpAMD64CMOVLGTF) 51154 v.AddArg(y) 51155 v.AddArg(x) 51156 v.AddArg(cond) 51157 return true 51158 } 51159 // match: (CondSelect <t> x y (SETGEF cond)) 51160 // cond: is32BitInt(t) 51161 // result: (CMOVLGEF y x cond) 51162 for { 51163 t := v.Type 51164 _ = v.Args[2] 51165 x := v.Args[0] 51166 y := v.Args[1] 51167 v_2 := v.Args[2] 51168 if v_2.Op != OpAMD64SETGEF { 51169 break 51170 } 51171 cond := v_2.Args[0] 51172 if !(is32BitInt(t)) { 51173 break 51174 } 51175 v.reset(OpAMD64CMOVLGEF) 51176 v.AddArg(y) 51177 v.AddArg(x) 51178 v.AddArg(cond) 51179 return true 51180 } 51181 // match: (CondSelect <t> x y (SETEQ cond)) 51182 // cond: is16BitInt(t) 51183 // result: (CMOVWEQ y x cond) 51184 for { 51185 t := v.Type 51186 _ = v.Args[2] 51187 x := v.Args[0] 51188 y := v.Args[1] 51189 v_2 := v.Args[2] 51190 if v_2.Op != OpAMD64SETEQ { 51191 break 51192 } 51193 cond := v_2.Args[0] 51194 if !(is16BitInt(t)) { 51195 break 51196 } 51197 v.reset(OpAMD64CMOVWEQ) 51198 v.AddArg(y) 51199 v.AddArg(x) 51200 v.AddArg(cond) 51201 return true 51202 } 51203 // match: (CondSelect <t> x y (SETNE cond)) 51204 // cond: is16BitInt(t) 51205 // result: (CMOVWNE y x cond) 51206 for { 51207 t := v.Type 51208 _ = v.Args[2] 51209 x := v.Args[0] 51210 y := v.Args[1] 51211 v_2 := v.Args[2] 51212 if v_2.Op != OpAMD64SETNE { 51213 break 51214 } 51215 cond := v_2.Args[0] 51216 if !(is16BitInt(t)) { 51217 break 51218 } 51219 v.reset(OpAMD64CMOVWNE) 51220 v.AddArg(y) 51221 v.AddArg(x) 51222 v.AddArg(cond) 51223 return true 51224 } 51225 return false 51226 } 51227 func rewriteValueAMD64_OpCondSelect_30(v *Value) bool { 51228 // match: (CondSelect <t> x y (SETL cond)) 51229 // cond: is16BitInt(t) 51230 // result: (CMOVWLT y x cond) 51231 for { 51232 t := v.Type 51233 _ = v.Args[2] 51234 x := v.Args[0] 51235 y := v.Args[1] 51236 v_2 := v.Args[2] 51237 if v_2.Op != OpAMD64SETL { 51238 break 51239 } 51240 cond := v_2.Args[0] 51241 if !(is16BitInt(t)) { 51242 break 51243 } 51244 v.reset(OpAMD64CMOVWLT) 51245 v.AddArg(y) 51246 v.AddArg(x) 51247 v.AddArg(cond) 51248 return true 51249 } 51250 // match: (CondSelect <t> x y (SETG cond)) 51251 // cond: is16BitInt(t) 51252 // result: (CMOVWGT y x cond) 51253 for { 51254 t := v.Type 51255 _ = v.Args[2] 51256 x := v.Args[0] 51257 y := v.Args[1] 51258 v_2 := v.Args[2] 51259 if v_2.Op != OpAMD64SETG { 51260 break 51261 } 51262 cond := v_2.Args[0] 51263 if !(is16BitInt(t)) { 51264 break 51265 } 51266 v.reset(OpAMD64CMOVWGT) 51267 v.AddArg(y) 51268 v.AddArg(x) 51269 v.AddArg(cond) 51270 return true 51271 } 51272 // match: (CondSelect <t> x y (SETLE cond)) 51273 // cond: is16BitInt(t) 51274 // result: (CMOVWLE y x cond) 51275 for { 51276 t := v.Type 51277 _ = v.Args[2] 51278 x := v.Args[0] 51279 y := v.Args[1] 51280 v_2 := v.Args[2] 51281 if v_2.Op != OpAMD64SETLE { 51282 break 51283 } 51284 cond := v_2.Args[0] 51285 if !(is16BitInt(t)) { 51286 break 51287 } 51288 v.reset(OpAMD64CMOVWLE) 51289 v.AddArg(y) 51290 v.AddArg(x) 51291 v.AddArg(cond) 51292 return true 51293 } 51294 // match: (CondSelect <t> x y (SETGE cond)) 51295 // cond: is16BitInt(t) 51296 // result: (CMOVWGE y x cond) 51297 for { 51298 t := v.Type 51299 _ = v.Args[2] 51300 x := v.Args[0] 51301 y := v.Args[1] 51302 v_2 := v.Args[2] 51303 if v_2.Op != OpAMD64SETGE { 51304 break 51305 } 51306 cond := v_2.Args[0] 51307 if !(is16BitInt(t)) { 51308 break 51309 } 51310 v.reset(OpAMD64CMOVWGE) 51311 v.AddArg(y) 51312 v.AddArg(x) 51313 v.AddArg(cond) 51314 return true 51315 } 51316 // match: (CondSelect <t> x y (SETA cond)) 51317 // cond: is16BitInt(t) 51318 // result: (CMOVWHI y x cond) 51319 for { 51320 t := v.Type 51321 _ = v.Args[2] 51322 x := v.Args[0] 51323 y := v.Args[1] 51324 v_2 := v.Args[2] 51325 if v_2.Op != OpAMD64SETA { 51326 break 51327 } 51328 cond := v_2.Args[0] 51329 if !(is16BitInt(t)) { 51330 break 51331 } 51332 v.reset(OpAMD64CMOVWHI) 51333 v.AddArg(y) 51334 v.AddArg(x) 51335 v.AddArg(cond) 51336 return true 51337 } 51338 // match: (CondSelect <t> x y (SETB cond)) 51339 // cond: is16BitInt(t) 51340 // result: (CMOVWCS y x cond) 51341 for { 51342 t := v.Type 51343 _ = v.Args[2] 51344 x := v.Args[0] 51345 y := v.Args[1] 51346 v_2 := v.Args[2] 51347 if v_2.Op != OpAMD64SETB { 51348 break 51349 } 51350 cond := v_2.Args[0] 51351 if !(is16BitInt(t)) { 51352 break 51353 } 51354 v.reset(OpAMD64CMOVWCS) 51355 v.AddArg(y) 51356 v.AddArg(x) 51357 v.AddArg(cond) 51358 return true 51359 } 51360 // match: (CondSelect <t> x y (SETAE cond)) 51361 // cond: is16BitInt(t) 51362 // result: (CMOVWCC y x cond) 51363 for { 51364 t := v.Type 51365 _ = v.Args[2] 51366 x := v.Args[0] 51367 y := v.Args[1] 51368 v_2 := v.Args[2] 51369 if v_2.Op != OpAMD64SETAE { 51370 break 51371 } 51372 cond := v_2.Args[0] 51373 if !(is16BitInt(t)) { 51374 break 51375 } 51376 v.reset(OpAMD64CMOVWCC) 51377 v.AddArg(y) 51378 v.AddArg(x) 51379 v.AddArg(cond) 51380 return true 51381 } 51382 // match: (CondSelect <t> x y (SETBE cond)) 51383 // cond: is16BitInt(t) 51384 // result: (CMOVWLS y x cond) 51385 for { 51386 t := v.Type 51387 _ = v.Args[2] 51388 x := v.Args[0] 51389 y := v.Args[1] 51390 v_2 := v.Args[2] 51391 if v_2.Op != OpAMD64SETBE { 51392 break 51393 } 51394 cond := v_2.Args[0] 51395 if !(is16BitInt(t)) { 51396 break 51397 } 51398 v.reset(OpAMD64CMOVWLS) 51399 v.AddArg(y) 51400 v.AddArg(x) 51401 v.AddArg(cond) 51402 return true 51403 } 51404 // match: (CondSelect <t> x y (SETEQF cond)) 51405 // cond: is16BitInt(t) 51406 // result: (CMOVWEQF y x cond) 51407 for { 51408 t := v.Type 51409 _ = v.Args[2] 51410 x := v.Args[0] 51411 y := v.Args[1] 51412 v_2 := v.Args[2] 51413 if v_2.Op != OpAMD64SETEQF { 51414 break 51415 } 51416 cond := v_2.Args[0] 51417 if !(is16BitInt(t)) { 51418 break 51419 } 51420 v.reset(OpAMD64CMOVWEQF) 51421 v.AddArg(y) 51422 v.AddArg(x) 51423 v.AddArg(cond) 51424 return true 51425 } 51426 // match: (CondSelect <t> x y (SETNEF cond)) 51427 // cond: is16BitInt(t) 51428 // result: (CMOVWNEF y x cond) 51429 for { 51430 t := v.Type 51431 _ = v.Args[2] 51432 x := v.Args[0] 51433 y := v.Args[1] 51434 v_2 := v.Args[2] 51435 if v_2.Op != OpAMD64SETNEF { 51436 break 51437 } 51438 cond := v_2.Args[0] 51439 if !(is16BitInt(t)) { 51440 break 51441 } 51442 v.reset(OpAMD64CMOVWNEF) 51443 v.AddArg(y) 51444 v.AddArg(x) 51445 v.AddArg(cond) 51446 return true 51447 } 51448 return false 51449 } 51450 func rewriteValueAMD64_OpCondSelect_40(v *Value) bool { 51451 b := v.Block 51452 typ := &b.Func.Config.Types 51453 // match: (CondSelect <t> x y (SETGF cond)) 51454 // cond: is16BitInt(t) 51455 // result: (CMOVWGTF y x cond) 51456 for { 51457 t := v.Type 51458 _ = v.Args[2] 51459 x := v.Args[0] 51460 y := v.Args[1] 51461 v_2 := v.Args[2] 51462 if v_2.Op != OpAMD64SETGF { 51463 break 51464 } 51465 cond := v_2.Args[0] 51466 if !(is16BitInt(t)) { 51467 break 51468 } 51469 v.reset(OpAMD64CMOVWGTF) 51470 v.AddArg(y) 51471 v.AddArg(x) 51472 v.AddArg(cond) 51473 return true 51474 } 51475 // match: (CondSelect <t> x y (SETGEF cond)) 51476 // cond: is16BitInt(t) 51477 // result: (CMOVWGEF y x cond) 51478 for { 51479 t := v.Type 51480 _ = v.Args[2] 51481 x := v.Args[0] 51482 y := v.Args[1] 51483 v_2 := v.Args[2] 51484 if v_2.Op != OpAMD64SETGEF { 51485 break 51486 } 51487 cond := v_2.Args[0] 51488 if !(is16BitInt(t)) { 51489 break 51490 } 51491 v.reset(OpAMD64CMOVWGEF) 51492 v.AddArg(y) 51493 v.AddArg(x) 51494 v.AddArg(cond) 51495 return true 51496 } 51497 // match: (CondSelect <t> x y check) 51498 // cond: !check.Type.IsFlags() && check.Type.Size() == 1 51499 // result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check)) 51500 for { 51501 t := v.Type 51502 check := v.Args[2] 51503 x := v.Args[0] 51504 y := v.Args[1] 51505 if !(!check.Type.IsFlags() && check.Type.Size() == 1) { 51506 break 51507 } 51508 v.reset(OpCondSelect) 51509 v.Type = t 51510 v.AddArg(x) 51511 v.AddArg(y) 51512 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) 51513 v0.AddArg(check) 51514 v.AddArg(v0) 51515 return true 51516 } 51517 // match: (CondSelect <t> x y check) 51518 // cond: !check.Type.IsFlags() && check.Type.Size() == 2 51519 // result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check)) 51520 for { 51521 t := v.Type 51522 check := v.Args[2] 51523 x := v.Args[0] 51524 y := v.Args[1] 51525 if !(!check.Type.IsFlags() && check.Type.Size() == 2) { 51526 break 51527 } 51528 v.reset(OpCondSelect) 51529 v.Type = t 51530 v.AddArg(x) 51531 v.AddArg(y) 51532 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) 51533 v0.AddArg(check) 51534 v.AddArg(v0) 51535 return true 51536 } 51537 // match: (CondSelect <t> x y check) 51538 // cond: !check.Type.IsFlags() && check.Type.Size() == 4 51539 // result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check)) 51540 for { 51541 t := v.Type 51542 check := v.Args[2] 51543 x := v.Args[0] 51544 y := v.Args[1] 51545 if !(!check.Type.IsFlags() && check.Type.Size() == 4) { 51546 break 51547 } 51548 v.reset(OpCondSelect) 51549 v.Type = t 51550 v.AddArg(x) 51551 v.AddArg(y) 51552 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 51553 v0.AddArg(check) 51554 v.AddArg(v0) 51555 return true 51556 } 51557 // match: (CondSelect <t> x y check) 51558 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) 51559 // result: (CMOVQNE y x (CMPQconst [0] check)) 51560 for { 51561 t := v.Type 51562 check := v.Args[2] 51563 x := v.Args[0] 51564 y := v.Args[1] 51565 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { 51566 break 51567 } 51568 v.reset(OpAMD64CMOVQNE) 51569 v.AddArg(y) 51570 v.AddArg(x) 51571 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 51572 v0.AuxInt = 0 51573 v0.AddArg(check) 51574 v.AddArg(v0) 51575 return true 51576 } 51577 // match: (CondSelect <t> x y check) 51578 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) 51579 // result: (CMOVLNE y x (CMPQconst [0] check)) 51580 for { 51581 t := v.Type 51582 check := v.Args[2] 51583 x := v.Args[0] 51584 y := v.Args[1] 51585 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { 51586 break 51587 } 51588 v.reset(OpAMD64CMOVLNE) 51589 v.AddArg(y) 51590 v.AddArg(x) 51591 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 51592 v0.AuxInt = 0 51593 v0.AddArg(check) 51594 v.AddArg(v0) 51595 return true 51596 } 51597 // match: (CondSelect <t> x y check) 51598 // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) 51599 // result: (CMOVWNE y x (CMPQconst [0] check)) 51600 for { 51601 t := v.Type 51602 check := v.Args[2] 51603 x := v.Args[0] 51604 y := v.Args[1] 51605 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { 51606 break 51607 } 51608 v.reset(OpAMD64CMOVWNE) 51609 v.AddArg(y) 51610 v.AddArg(x) 51611 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 51612 v0.AuxInt = 0 51613 v0.AddArg(check) 51614 v.AddArg(v0) 51615 return true 51616 } 51617 return false 51618 } 51619 func rewriteValueAMD64_OpConst16_0(v *Value) bool { 51620 // match: (Const16 [val]) 51621 // result: (MOVLconst [val]) 51622 for { 51623 val := v.AuxInt 51624 v.reset(OpAMD64MOVLconst) 51625 v.AuxInt = val 51626 return true 51627 } 51628 } 51629 func rewriteValueAMD64_OpConst32_0(v *Value) bool { 51630 // match: (Const32 [val]) 51631 // result: (MOVLconst [val]) 51632 for { 51633 val := v.AuxInt 51634 v.reset(OpAMD64MOVLconst) 51635 v.AuxInt = val 51636 return true 51637 } 51638 } 51639 func rewriteValueAMD64_OpConst32F_0(v *Value) bool { 51640 // match: (Const32F [val]) 51641 // result: (MOVSSconst [val]) 51642 for { 51643 val := v.AuxInt 51644 v.reset(OpAMD64MOVSSconst) 51645 v.AuxInt = val 51646 return true 51647 } 51648 } 51649 func rewriteValueAMD64_OpConst64_0(v *Value) bool { 51650 // match: (Const64 [val]) 51651 // result: (MOVQconst [val]) 51652 for { 51653 val := v.AuxInt 51654 v.reset(OpAMD64MOVQconst) 51655 v.AuxInt = val 51656 return true 51657 } 51658 } 51659 func rewriteValueAMD64_OpConst64F_0(v *Value) bool { 51660 // match: (Const64F [val]) 51661 // result: (MOVSDconst [val]) 51662 for { 51663 val := v.AuxInt 51664 v.reset(OpAMD64MOVSDconst) 51665 v.AuxInt = val 51666 return true 51667 } 51668 } 51669 func rewriteValueAMD64_OpConst8_0(v *Value) bool { 51670 // match: (Const8 [val]) 51671 // result: (MOVLconst [val]) 51672 for { 51673 val := v.AuxInt 51674 v.reset(OpAMD64MOVLconst) 51675 v.AuxInt = val 51676 return true 51677 } 51678 } 51679 func rewriteValueAMD64_OpConstBool_0(v *Value) bool { 51680 // match: (ConstBool [b]) 51681 // result: (MOVLconst [b]) 51682 for { 51683 b := v.AuxInt 51684 v.reset(OpAMD64MOVLconst) 51685 v.AuxInt = b 51686 return true 51687 } 51688 } 51689 func rewriteValueAMD64_OpConstNil_0(v *Value) bool { 51690 // match: (ConstNil) 51691 // result: (MOVQconst [0]) 51692 for { 51693 v.reset(OpAMD64MOVQconst) 51694 v.AuxInt = 0 51695 return true 51696 } 51697 } 51698 func rewriteValueAMD64_OpCtz16_0(v *Value) bool { 51699 b := v.Block 51700 typ := &b.Func.Config.Types 51701 // match: (Ctz16 x) 51702 // result: (BSFL (BTSLconst <typ.UInt32> [16] x)) 51703 for { 51704 x := v.Args[0] 51705 v.reset(OpAMD64BSFL) 51706 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32) 51707 v0.AuxInt = 16 51708 v0.AddArg(x) 51709 v.AddArg(v0) 51710 return true 51711 } 51712 } 51713 func rewriteValueAMD64_OpCtz16NonZero_0(v *Value) bool { 51714 // match: (Ctz16NonZero x) 51715 // result: (BSFL x) 51716 for { 51717 x := v.Args[0] 51718 v.reset(OpAMD64BSFL) 51719 v.AddArg(x) 51720 return true 51721 } 51722 } 51723 func rewriteValueAMD64_OpCtz32_0(v *Value) bool { 51724 b := v.Block 51725 typ := &b.Func.Config.Types 51726 // match: (Ctz32 x) 51727 // result: (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x))) 51728 for { 51729 x := v.Args[0] 51730 v.reset(OpSelect0) 51731 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 51732 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) 51733 v1.AuxInt = 32 51734 v1.AddArg(x) 51735 v0.AddArg(v1) 51736 v.AddArg(v0) 51737 return true 51738 } 51739 } 51740 func rewriteValueAMD64_OpCtz32NonZero_0(v *Value) bool { 51741 // match: (Ctz32NonZero x) 51742 // result: (BSFL x) 51743 for { 51744 x := v.Args[0] 51745 v.reset(OpAMD64BSFL) 51746 v.AddArg(x) 51747 return true 51748 } 51749 } 51750 func rewriteValueAMD64_OpCtz64_0(v *Value) bool { 51751 b := v.Block 51752 typ := &b.Func.Config.Types 51753 // match: (Ctz64 <t> x) 51754 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 51755 for { 51756 t := v.Type 51757 x := v.Args[0] 51758 v.reset(OpAMD64CMOVQEQ) 51759 v0 := b.NewValue0(v.Pos, OpSelect0, t) 51760 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 51761 v1.AddArg(x) 51762 v0.AddArg(v1) 51763 v.AddArg(v0) 51764 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 51765 v2.AuxInt = 64 51766 v.AddArg(v2) 51767 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 51768 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 51769 v4.AddArg(x) 51770 v3.AddArg(v4) 51771 v.AddArg(v3) 51772 return true 51773 } 51774 } 51775 func rewriteValueAMD64_OpCtz64NonZero_0(v *Value) bool { 51776 b := v.Block 51777 typ := &b.Func.Config.Types 51778 // match: (Ctz64NonZero x) 51779 // result: (Select0 (BSFQ x)) 51780 for { 51781 x := v.Args[0] 51782 v.reset(OpSelect0) 51783 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 51784 v0.AddArg(x) 51785 v.AddArg(v0) 51786 return true 51787 } 51788 } 51789 func rewriteValueAMD64_OpCtz8_0(v *Value) bool { 51790 b := v.Block 51791 typ := &b.Func.Config.Types 51792 // match: (Ctz8 x) 51793 // result: (BSFL (BTSLconst <typ.UInt32> [ 8] x)) 51794 for { 51795 x := v.Args[0] 51796 v.reset(OpAMD64BSFL) 51797 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32) 51798 v0.AuxInt = 8 51799 v0.AddArg(x) 51800 v.AddArg(v0) 51801 return true 51802 } 51803 } 51804 func rewriteValueAMD64_OpCtz8NonZero_0(v *Value) bool { 51805 // match: (Ctz8NonZero x) 51806 // result: (BSFL x) 51807 for { 51808 x := v.Args[0] 51809 v.reset(OpAMD64BSFL) 51810 v.AddArg(x) 51811 return true 51812 } 51813 } 51814 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool { 51815 // match: (Cvt32Fto32 x) 51816 // result: (CVTTSS2SL x) 51817 for { 51818 x := v.Args[0] 51819 v.reset(OpAMD64CVTTSS2SL) 51820 v.AddArg(x) 51821 return true 51822 } 51823 } 51824 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool { 51825 // match: (Cvt32Fto64 x) 51826 // result: (CVTTSS2SQ x) 51827 for { 51828 x := v.Args[0] 51829 v.reset(OpAMD64CVTTSS2SQ) 51830 v.AddArg(x) 51831 return true 51832 } 51833 } 51834 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool { 51835 // match: (Cvt32Fto64F x) 51836 // result: (CVTSS2SD x) 51837 for { 51838 x := v.Args[0] 51839 v.reset(OpAMD64CVTSS2SD) 51840 v.AddArg(x) 51841 return true 51842 } 51843 } 51844 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool { 51845 // match: (Cvt32to32F x) 51846 // result: (CVTSL2SS x) 51847 for { 51848 x := v.Args[0] 51849 v.reset(OpAMD64CVTSL2SS) 51850 v.AddArg(x) 51851 return true 51852 } 51853 } 51854 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool { 51855 // match: (Cvt32to64F x) 51856 // result: (CVTSL2SD x) 51857 for { 51858 x := v.Args[0] 51859 v.reset(OpAMD64CVTSL2SD) 51860 v.AddArg(x) 51861 return true 51862 } 51863 } 51864 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool { 51865 // match: (Cvt64Fto32 x) 51866 // result: (CVTTSD2SL x) 51867 for { 51868 x := v.Args[0] 51869 v.reset(OpAMD64CVTTSD2SL) 51870 v.AddArg(x) 51871 return true 51872 } 51873 } 51874 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool { 51875 // match: (Cvt64Fto32F x) 51876 // result: (CVTSD2SS x) 51877 for { 51878 x := v.Args[0] 51879 v.reset(OpAMD64CVTSD2SS) 51880 v.AddArg(x) 51881 return true 51882 } 51883 } 51884 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool { 51885 // match: (Cvt64Fto64 x) 51886 // result: (CVTTSD2SQ x) 51887 for { 51888 x := v.Args[0] 51889 v.reset(OpAMD64CVTTSD2SQ) 51890 v.AddArg(x) 51891 return true 51892 } 51893 } 51894 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool { 51895 // match: (Cvt64to32F x) 51896 // result: (CVTSQ2SS x) 51897 for { 51898 x := v.Args[0] 51899 v.reset(OpAMD64CVTSQ2SS) 51900 v.AddArg(x) 51901 return true 51902 } 51903 } 51904 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool { 51905 // match: (Cvt64to64F x) 51906 // result: (CVTSQ2SD x) 51907 for { 51908 x := v.Args[0] 51909 v.reset(OpAMD64CVTSQ2SD) 51910 v.AddArg(x) 51911 return true 51912 } 51913 } 51914 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool { 51915 // match: (Div128u xhi xlo y) 51916 // result: (DIVQU2 xhi xlo y) 51917 for { 51918 y := v.Args[2] 51919 xhi := v.Args[0] 51920 xlo := v.Args[1] 51921 v.reset(OpAMD64DIVQU2) 51922 v.AddArg(xhi) 51923 v.AddArg(xlo) 51924 v.AddArg(y) 51925 return true 51926 } 51927 } 51928 func rewriteValueAMD64_OpDiv16_0(v *Value) bool { 51929 b := v.Block 51930 typ := &b.Func.Config.Types 51931 // match: (Div16 [a] x y) 51932 // result: (Select0 (DIVW [a] x y)) 51933 for { 51934 a := v.AuxInt 51935 y := v.Args[1] 51936 x := v.Args[0] 51937 v.reset(OpSelect0) 51938 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 51939 v0.AuxInt = a 51940 v0.AddArg(x) 51941 v0.AddArg(y) 51942 v.AddArg(v0) 51943 return true 51944 } 51945 } 51946 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { 51947 b := v.Block 51948 typ := &b.Func.Config.Types 51949 // match: (Div16u x y) 51950 // result: (Select0 (DIVWU x y)) 51951 for { 51952 y := v.Args[1] 51953 x := v.Args[0] 51954 v.reset(OpSelect0) 51955 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 51956 v0.AddArg(x) 51957 v0.AddArg(y) 51958 v.AddArg(v0) 51959 return true 51960 } 51961 } 51962 func rewriteValueAMD64_OpDiv32_0(v *Value) bool { 51963 b := v.Block 51964 typ := &b.Func.Config.Types 51965 // match: (Div32 [a] x y) 51966 // result: (Select0 (DIVL [a] x y)) 51967 for { 51968 a := v.AuxInt 51969 y := v.Args[1] 51970 x := v.Args[0] 51971 v.reset(OpSelect0) 51972 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 51973 v0.AuxInt = a 51974 v0.AddArg(x) 51975 v0.AddArg(y) 51976 v.AddArg(v0) 51977 return true 51978 } 51979 } 51980 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool { 51981 // match: (Div32F x y) 51982 // result: (DIVSS x y) 51983 for { 51984 y := v.Args[1] 51985 x := v.Args[0] 51986 v.reset(OpAMD64DIVSS) 51987 v.AddArg(x) 51988 v.AddArg(y) 51989 return true 51990 } 51991 } 51992 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { 51993 b := v.Block 51994 typ := &b.Func.Config.Types 51995 // match: (Div32u x y) 51996 // result: (Select0 (DIVLU x y)) 51997 for { 51998 y := v.Args[1] 51999 x := v.Args[0] 52000 v.reset(OpSelect0) 52001 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 52002 v0.AddArg(x) 52003 v0.AddArg(y) 52004 v.AddArg(v0) 52005 return true 52006 } 52007 } 52008 func rewriteValueAMD64_OpDiv64_0(v *Value) bool { 52009 b := v.Block 52010 typ := &b.Func.Config.Types 52011 // match: (Div64 [a] x y) 52012 // result: (Select0 (DIVQ [a] x y)) 52013 for { 52014 a := v.AuxInt 52015 y := v.Args[1] 52016 x := v.Args[0] 52017 v.reset(OpSelect0) 52018 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 52019 v0.AuxInt = a 52020 v0.AddArg(x) 52021 v0.AddArg(y) 52022 v.AddArg(v0) 52023 return true 52024 } 52025 } 52026 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool { 52027 // match: (Div64F x y) 52028 // result: (DIVSD x y) 52029 for { 52030 y := v.Args[1] 52031 x := v.Args[0] 52032 v.reset(OpAMD64DIVSD) 52033 v.AddArg(x) 52034 v.AddArg(y) 52035 return true 52036 } 52037 } 52038 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { 52039 b := v.Block 52040 typ := &b.Func.Config.Types 52041 // match: (Div64u x y) 52042 // result: (Select0 (DIVQU x y)) 52043 for { 52044 y := v.Args[1] 52045 x := v.Args[0] 52046 v.reset(OpSelect0) 52047 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 52048 v0.AddArg(x) 52049 v0.AddArg(y) 52050 v.AddArg(v0) 52051 return true 52052 } 52053 } 52054 func rewriteValueAMD64_OpDiv8_0(v *Value) bool { 52055 b := v.Block 52056 typ := &b.Func.Config.Types 52057 // match: (Div8 x y) 52058 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 52059 for { 52060 y := v.Args[1] 52061 x := v.Args[0] 52062 v.reset(OpSelect0) 52063 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 52064 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 52065 v1.AddArg(x) 52066 v0.AddArg(v1) 52067 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 52068 v2.AddArg(y) 52069 v0.AddArg(v2) 52070 v.AddArg(v0) 52071 return true 52072 } 52073 } 52074 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { 52075 b := v.Block 52076 typ := &b.Func.Config.Types 52077 // match: (Div8u x y) 52078 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 52079 for { 52080 y := v.Args[1] 52081 x := v.Args[0] 52082 v.reset(OpSelect0) 52083 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 52084 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 52085 v1.AddArg(x) 52086 v0.AddArg(v1) 52087 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 52088 v2.AddArg(y) 52089 v0.AddArg(v2) 52090 v.AddArg(v0) 52091 return true 52092 } 52093 } 52094 func rewriteValueAMD64_OpEq16_0(v *Value) bool { 52095 b := v.Block 52096 // match: (Eq16 x y) 52097 // result: (SETEQ (CMPW x y)) 52098 for { 52099 y := v.Args[1] 52100 x := v.Args[0] 52101 v.reset(OpAMD64SETEQ) 52102 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 52103 v0.AddArg(x) 52104 v0.AddArg(y) 52105 v.AddArg(v0) 52106 return true 52107 } 52108 } 52109 func rewriteValueAMD64_OpEq32_0(v *Value) bool { 52110 b := v.Block 52111 // match: (Eq32 x y) 52112 // result: (SETEQ (CMPL x y)) 52113 for { 52114 y := v.Args[1] 52115 x := v.Args[0] 52116 v.reset(OpAMD64SETEQ) 52117 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 52118 v0.AddArg(x) 52119 v0.AddArg(y) 52120 v.AddArg(v0) 52121 return true 52122 } 52123 } 52124 func rewriteValueAMD64_OpEq32F_0(v *Value) bool { 52125 b := v.Block 52126 // match: (Eq32F x y) 52127 // result: (SETEQF (UCOMISS x y)) 52128 for { 52129 y := v.Args[1] 52130 x := v.Args[0] 52131 v.reset(OpAMD64SETEQF) 52132 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 52133 v0.AddArg(x) 52134 v0.AddArg(y) 52135 v.AddArg(v0) 52136 return true 52137 } 52138 } 52139 func rewriteValueAMD64_OpEq64_0(v *Value) bool { 52140 b := v.Block 52141 // match: (Eq64 x y) 52142 // result: (SETEQ (CMPQ x y)) 52143 for { 52144 y := v.Args[1] 52145 x := v.Args[0] 52146 v.reset(OpAMD64SETEQ) 52147 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 52148 v0.AddArg(x) 52149 v0.AddArg(y) 52150 v.AddArg(v0) 52151 return true 52152 } 52153 } 52154 func rewriteValueAMD64_OpEq64F_0(v *Value) bool { 52155 b := v.Block 52156 // match: (Eq64F x y) 52157 // result: (SETEQF (UCOMISD x y)) 52158 for { 52159 y := v.Args[1] 52160 x := v.Args[0] 52161 v.reset(OpAMD64SETEQF) 52162 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 52163 v0.AddArg(x) 52164 v0.AddArg(y) 52165 v.AddArg(v0) 52166 return true 52167 } 52168 } 52169 func rewriteValueAMD64_OpEq8_0(v *Value) bool { 52170 b := v.Block 52171 // match: (Eq8 x y) 52172 // result: (SETEQ (CMPB x y)) 52173 for { 52174 y := v.Args[1] 52175 x := v.Args[0] 52176 v.reset(OpAMD64SETEQ) 52177 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 52178 v0.AddArg(x) 52179 v0.AddArg(y) 52180 v.AddArg(v0) 52181 return true 52182 } 52183 } 52184 func rewriteValueAMD64_OpEqB_0(v *Value) bool { 52185 b := v.Block 52186 // match: (EqB x y) 52187 // result: (SETEQ (CMPB x y)) 52188 for { 52189 y := v.Args[1] 52190 x := v.Args[0] 52191 v.reset(OpAMD64SETEQ) 52192 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 52193 v0.AddArg(x) 52194 v0.AddArg(y) 52195 v.AddArg(v0) 52196 return true 52197 } 52198 } 52199 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { 52200 b := v.Block 52201 // match: (EqPtr x y) 52202 // result: (SETEQ (CMPQ x y)) 52203 for { 52204 y := v.Args[1] 52205 x := v.Args[0] 52206 v.reset(OpAMD64SETEQ) 52207 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 52208 v0.AddArg(x) 52209 v0.AddArg(y) 52210 v.AddArg(v0) 52211 return true 52212 } 52213 } 52214 func rewriteValueAMD64_OpFMA_0(v *Value) bool { 52215 // match: (FMA x y z) 52216 // result: (VFMADD231SD z x y) 52217 for { 52218 z := v.Args[2] 52219 x := v.Args[0] 52220 y := v.Args[1] 52221 v.reset(OpAMD64VFMADD231SD) 52222 v.AddArg(z) 52223 v.AddArg(x) 52224 v.AddArg(y) 52225 return true 52226 } 52227 } 52228 func rewriteValueAMD64_OpFloor_0(v *Value) bool { 52229 // match: (Floor x) 52230 // result: (ROUNDSD [1] x) 52231 for { 52232 x := v.Args[0] 52233 v.reset(OpAMD64ROUNDSD) 52234 v.AuxInt = 1 52235 v.AddArg(x) 52236 return true 52237 } 52238 } 52239 func rewriteValueAMD64_OpGeq16_0(v *Value) bool { 52240 b := v.Block 52241 // match: (Geq16 x y) 52242 // result: (SETGE (CMPW x y)) 52243 for { 52244 y := v.Args[1] 52245 x := v.Args[0] 52246 v.reset(OpAMD64SETGE) 52247 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 52248 v0.AddArg(x) 52249 v0.AddArg(y) 52250 v.AddArg(v0) 52251 return true 52252 } 52253 } 52254 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { 52255 b := v.Block 52256 // match: (Geq16U x y) 52257 // result: (SETAE (CMPW x y)) 52258 for { 52259 y := v.Args[1] 52260 x := v.Args[0] 52261 v.reset(OpAMD64SETAE) 52262 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 52263 v0.AddArg(x) 52264 v0.AddArg(y) 52265 v.AddArg(v0) 52266 return true 52267 } 52268 } 52269 func rewriteValueAMD64_OpGeq32_0(v *Value) bool { 52270 b := v.Block 52271 // match: (Geq32 x y) 52272 // result: (SETGE (CMPL x y)) 52273 for { 52274 y := v.Args[1] 52275 x := v.Args[0] 52276 v.reset(OpAMD64SETGE) 52277 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 52278 v0.AddArg(x) 52279 v0.AddArg(y) 52280 v.AddArg(v0) 52281 return true 52282 } 52283 } 52284 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { 52285 b := v.Block 52286 // match: (Geq32F x y) 52287 // result: (SETGEF (UCOMISS x y)) 52288 for { 52289 y := v.Args[1] 52290 x := v.Args[0] 52291 v.reset(OpAMD64SETGEF) 52292 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 52293 v0.AddArg(x) 52294 v0.AddArg(y) 52295 v.AddArg(v0) 52296 return true 52297 } 52298 } 52299 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { 52300 b := v.Block 52301 // match: (Geq32U x y) 52302 // result: (SETAE (CMPL x y)) 52303 for { 52304 y := v.Args[1] 52305 x := v.Args[0] 52306 v.reset(OpAMD64SETAE) 52307 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 52308 v0.AddArg(x) 52309 v0.AddArg(y) 52310 v.AddArg(v0) 52311 return true 52312 } 52313 } 52314 func rewriteValueAMD64_OpGeq64_0(v *Value) bool { 52315 b := v.Block 52316 // match: (Geq64 x y) 52317 // result: (SETGE (CMPQ x y)) 52318 for { 52319 y := v.Args[1] 52320 x := v.Args[0] 52321 v.reset(OpAMD64SETGE) 52322 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 52323 v0.AddArg(x) 52324 v0.AddArg(y) 52325 v.AddArg(v0) 52326 return true 52327 } 52328 } 52329 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { 52330 b := v.Block 52331 // match: (Geq64F x y) 52332 // result: (SETGEF (UCOMISD x y)) 52333 for { 52334 y := v.Args[1] 52335 x := v.Args[0] 52336 v.reset(OpAMD64SETGEF) 52337 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 52338 v0.AddArg(x) 52339 v0.AddArg(y) 52340 v.AddArg(v0) 52341 return true 52342 } 52343 } 52344 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { 52345 b := v.Block 52346 // match: (Geq64U x y) 52347 // result: (SETAE (CMPQ x y)) 52348 for { 52349 y := v.Args[1] 52350 x := v.Args[0] 52351 v.reset(OpAMD64SETAE) 52352 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 52353 v0.AddArg(x) 52354 v0.AddArg(y) 52355 v.AddArg(v0) 52356 return true 52357 } 52358 } 52359 func rewriteValueAMD64_OpGeq8_0(v *Value) bool { 52360 b := v.Block 52361 // match: (Geq8 x y) 52362 // result: (SETGE (CMPB x y)) 52363 for { 52364 y := v.Args[1] 52365 x := v.Args[0] 52366 v.reset(OpAMD64SETGE) 52367 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 52368 v0.AddArg(x) 52369 v0.AddArg(y) 52370 v.AddArg(v0) 52371 return true 52372 } 52373 } 52374 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { 52375 b := v.Block 52376 // match: (Geq8U x y) 52377 // result: (SETAE (CMPB x y)) 52378 for { 52379 y := v.Args[1] 52380 x := v.Args[0] 52381 v.reset(OpAMD64SETAE) 52382 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 52383 v0.AddArg(x) 52384 v0.AddArg(y) 52385 v.AddArg(v0) 52386 return true 52387 } 52388 } 52389 func rewriteValueAMD64_OpGetCallerPC_0(v *Value) bool { 52390 // match: (GetCallerPC) 52391 // result: (LoweredGetCallerPC) 52392 for { 52393 v.reset(OpAMD64LoweredGetCallerPC) 52394 return true 52395 } 52396 } 52397 func rewriteValueAMD64_OpGetCallerSP_0(v *Value) bool { 52398 // match: (GetCallerSP) 52399 // result: (LoweredGetCallerSP) 52400 for { 52401 v.reset(OpAMD64LoweredGetCallerSP) 52402 return true 52403 } 52404 } 52405 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { 52406 // match: (GetClosurePtr) 52407 // result: (LoweredGetClosurePtr) 52408 for { 52409 v.reset(OpAMD64LoweredGetClosurePtr) 52410 return true 52411 } 52412 } 52413 func rewriteValueAMD64_OpGetG_0(v *Value) bool { 52414 // match: (GetG mem) 52415 // result: (LoweredGetG mem) 52416 for { 52417 mem := v.Args[0] 52418 v.reset(OpAMD64LoweredGetG) 52419 v.AddArg(mem) 52420 return true 52421 } 52422 } 52423 func rewriteValueAMD64_OpGreater16_0(v *Value) bool { 52424 b := v.Block 52425 // match: (Greater16 x y) 52426 // result: (SETG (CMPW x y)) 52427 for { 52428 y := v.Args[1] 52429 x := v.Args[0] 52430 v.reset(OpAMD64SETG) 52431 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 52432 v0.AddArg(x) 52433 v0.AddArg(y) 52434 v.AddArg(v0) 52435 return true 52436 } 52437 } 52438 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { 52439 b := v.Block 52440 // match: (Greater16U x y) 52441 // result: (SETA (CMPW x y)) 52442 for { 52443 y := v.Args[1] 52444 x := v.Args[0] 52445 v.reset(OpAMD64SETA) 52446 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 52447 v0.AddArg(x) 52448 v0.AddArg(y) 52449 v.AddArg(v0) 52450 return true 52451 } 52452 } 52453 func rewriteValueAMD64_OpGreater32_0(v *Value) bool { 52454 b := v.Block 52455 // match: (Greater32 x y) 52456 // result: (SETG (CMPL x y)) 52457 for { 52458 y := v.Args[1] 52459 x := v.Args[0] 52460 v.reset(OpAMD64SETG) 52461 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 52462 v0.AddArg(x) 52463 v0.AddArg(y) 52464 v.AddArg(v0) 52465 return true 52466 } 52467 } 52468 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { 52469 b := v.Block 52470 // match: (Greater32F x y) 52471 // result: (SETGF (UCOMISS x y)) 52472 for { 52473 y := v.Args[1] 52474 x := v.Args[0] 52475 v.reset(OpAMD64SETGF) 52476 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 52477 v0.AddArg(x) 52478 v0.AddArg(y) 52479 v.AddArg(v0) 52480 return true 52481 } 52482 } 52483 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { 52484 b := v.Block 52485 // match: (Greater32U x y) 52486 // result: (SETA (CMPL x y)) 52487 for { 52488 y := v.Args[1] 52489 x := v.Args[0] 52490 v.reset(OpAMD64SETA) 52491 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 52492 v0.AddArg(x) 52493 v0.AddArg(y) 52494 v.AddArg(v0) 52495 return true 52496 } 52497 } 52498 func rewriteValueAMD64_OpGreater64_0(v *Value) bool { 52499 b := v.Block 52500 // match: (Greater64 x y) 52501 // result: (SETG (CMPQ x y)) 52502 for { 52503 y := v.Args[1] 52504 x := v.Args[0] 52505 v.reset(OpAMD64SETG) 52506 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 52507 v0.AddArg(x) 52508 v0.AddArg(y) 52509 v.AddArg(v0) 52510 return true 52511 } 52512 } 52513 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { 52514 b := v.Block 52515 // match: (Greater64F x y) 52516 // result: (SETGF (UCOMISD x y)) 52517 for { 52518 y := v.Args[1] 52519 x := v.Args[0] 52520 v.reset(OpAMD64SETGF) 52521 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 52522 v0.AddArg(x) 52523 v0.AddArg(y) 52524 v.AddArg(v0) 52525 return true 52526 } 52527 } 52528 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { 52529 b := v.Block 52530 // match: (Greater64U x y) 52531 // result: (SETA (CMPQ x y)) 52532 for { 52533 y := v.Args[1] 52534 x := v.Args[0] 52535 v.reset(OpAMD64SETA) 52536 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 52537 v0.AddArg(x) 52538 v0.AddArg(y) 52539 v.AddArg(v0) 52540 return true 52541 } 52542 } 52543 func rewriteValueAMD64_OpGreater8_0(v *Value) bool { 52544 b := v.Block 52545 // match: (Greater8 x y) 52546 // result: (SETG (CMPB x y)) 52547 for { 52548 y := v.Args[1] 52549 x := v.Args[0] 52550 v.reset(OpAMD64SETG) 52551 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 52552 v0.AddArg(x) 52553 v0.AddArg(y) 52554 v.AddArg(v0) 52555 return true 52556 } 52557 } 52558 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { 52559 b := v.Block 52560 // match: (Greater8U x y) 52561 // result: (SETA (CMPB x y)) 52562 for { 52563 y := v.Args[1] 52564 x := v.Args[0] 52565 v.reset(OpAMD64SETA) 52566 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 52567 v0.AddArg(x) 52568 v0.AddArg(y) 52569 v.AddArg(v0) 52570 return true 52571 } 52572 } 52573 func rewriteValueAMD64_OpHmul32_0(v *Value) bool { 52574 // match: (Hmul32 x y) 52575 // result: (HMULL x y) 52576 for { 52577 y := v.Args[1] 52578 x := v.Args[0] 52579 v.reset(OpAMD64HMULL) 52580 v.AddArg(x) 52581 v.AddArg(y) 52582 return true 52583 } 52584 } 52585 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool { 52586 // match: (Hmul32u x y) 52587 // result: (HMULLU x y) 52588 for { 52589 y := v.Args[1] 52590 x := v.Args[0] 52591 v.reset(OpAMD64HMULLU) 52592 v.AddArg(x) 52593 v.AddArg(y) 52594 return true 52595 } 52596 } 52597 func rewriteValueAMD64_OpHmul64_0(v *Value) bool { 52598 // match: (Hmul64 x y) 52599 // result: (HMULQ x y) 52600 for { 52601 y := v.Args[1] 52602 x := v.Args[0] 52603 v.reset(OpAMD64HMULQ) 52604 v.AddArg(x) 52605 v.AddArg(y) 52606 return true 52607 } 52608 } 52609 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool { 52610 // match: (Hmul64u x y) 52611 // result: (HMULQU x y) 52612 for { 52613 y := v.Args[1] 52614 x := v.Args[0] 52615 v.reset(OpAMD64HMULQU) 52616 v.AddArg(x) 52617 v.AddArg(y) 52618 return true 52619 } 52620 } 52621 func rewriteValueAMD64_OpInterCall_0(v *Value) bool { 52622 // match: (InterCall [argwid] entry mem) 52623 // result: (CALLinter [argwid] entry mem) 52624 for { 52625 argwid := v.AuxInt 52626 mem := v.Args[1] 52627 entry := v.Args[0] 52628 v.reset(OpAMD64CALLinter) 52629 v.AuxInt = argwid 52630 v.AddArg(entry) 52631 v.AddArg(mem) 52632 return true 52633 } 52634 } 52635 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { 52636 b := v.Block 52637 // match: (IsInBounds idx len) 52638 // result: (SETB (CMPQ idx len)) 52639 for { 52640 len := v.Args[1] 52641 idx := v.Args[0] 52642 v.reset(OpAMD64SETB) 52643 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 52644 v0.AddArg(idx) 52645 v0.AddArg(len) 52646 v.AddArg(v0) 52647 return true 52648 } 52649 } 52650 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { 52651 b := v.Block 52652 // match: (IsNonNil p) 52653 // result: (SETNE (TESTQ p p)) 52654 for { 52655 p := v.Args[0] 52656 v.reset(OpAMD64SETNE) 52657 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 52658 v0.AddArg(p) 52659 v0.AddArg(p) 52660 v.AddArg(v0) 52661 return true 52662 } 52663 } 52664 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { 52665 b := v.Block 52666 // match: (IsSliceInBounds idx len) 52667 // result: (SETBE (CMPQ idx len)) 52668 for { 52669 len := v.Args[1] 52670 idx := v.Args[0] 52671 v.reset(OpAMD64SETBE) 52672 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 52673 v0.AddArg(idx) 52674 v0.AddArg(len) 52675 v.AddArg(v0) 52676 return true 52677 } 52678 } 52679 func rewriteValueAMD64_OpLeq16_0(v *Value) bool { 52680 b := v.Block 52681 // match: (Leq16 x y) 52682 // result: (SETLE (CMPW x y)) 52683 for { 52684 y := v.Args[1] 52685 x := v.Args[0] 52686 v.reset(OpAMD64SETLE) 52687 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 52688 v0.AddArg(x) 52689 v0.AddArg(y) 52690 v.AddArg(v0) 52691 return true 52692 } 52693 } 52694 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { 52695 b := v.Block 52696 // match: (Leq16U x y) 52697 // result: (SETBE (CMPW x y)) 52698 for { 52699 y := v.Args[1] 52700 x := v.Args[0] 52701 v.reset(OpAMD64SETBE) 52702 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 52703 v0.AddArg(x) 52704 v0.AddArg(y) 52705 v.AddArg(v0) 52706 return true 52707 } 52708 } 52709 func rewriteValueAMD64_OpLeq32_0(v *Value) bool { 52710 b := v.Block 52711 // match: (Leq32 x y) 52712 // result: (SETLE (CMPL x y)) 52713 for { 52714 y := v.Args[1] 52715 x := v.Args[0] 52716 v.reset(OpAMD64SETLE) 52717 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 52718 v0.AddArg(x) 52719 v0.AddArg(y) 52720 v.AddArg(v0) 52721 return true 52722 } 52723 } 52724 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { 52725 b := v.Block 52726 // match: (Leq32F x y) 52727 // result: (SETGEF (UCOMISS y x)) 52728 for { 52729 y := v.Args[1] 52730 x := v.Args[0] 52731 v.reset(OpAMD64SETGEF) 52732 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 52733 v0.AddArg(y) 52734 v0.AddArg(x) 52735 v.AddArg(v0) 52736 return true 52737 } 52738 } 52739 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { 52740 b := v.Block 52741 // match: (Leq32U x y) 52742 // result: (SETBE (CMPL x y)) 52743 for { 52744 y := v.Args[1] 52745 x := v.Args[0] 52746 v.reset(OpAMD64SETBE) 52747 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 52748 v0.AddArg(x) 52749 v0.AddArg(y) 52750 v.AddArg(v0) 52751 return true 52752 } 52753 } 52754 func rewriteValueAMD64_OpLeq64_0(v *Value) bool { 52755 b := v.Block 52756 // match: (Leq64 x y) 52757 // result: (SETLE (CMPQ x y)) 52758 for { 52759 y := v.Args[1] 52760 x := v.Args[0] 52761 v.reset(OpAMD64SETLE) 52762 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 52763 v0.AddArg(x) 52764 v0.AddArg(y) 52765 v.AddArg(v0) 52766 return true 52767 } 52768 } 52769 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { 52770 b := v.Block 52771 // match: (Leq64F x y) 52772 // result: (SETGEF (UCOMISD y x)) 52773 for { 52774 y := v.Args[1] 52775 x := v.Args[0] 52776 v.reset(OpAMD64SETGEF) 52777 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 52778 v0.AddArg(y) 52779 v0.AddArg(x) 52780 v.AddArg(v0) 52781 return true 52782 } 52783 } 52784 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { 52785 b := v.Block 52786 // match: (Leq64U x y) 52787 // result: (SETBE (CMPQ x y)) 52788 for { 52789 y := v.Args[1] 52790 x := v.Args[0] 52791 v.reset(OpAMD64SETBE) 52792 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 52793 v0.AddArg(x) 52794 v0.AddArg(y) 52795 v.AddArg(v0) 52796 return true 52797 } 52798 } 52799 func rewriteValueAMD64_OpLeq8_0(v *Value) bool { 52800 b := v.Block 52801 // match: (Leq8 x y) 52802 // result: (SETLE (CMPB x y)) 52803 for { 52804 y := v.Args[1] 52805 x := v.Args[0] 52806 v.reset(OpAMD64SETLE) 52807 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 52808 v0.AddArg(x) 52809 v0.AddArg(y) 52810 v.AddArg(v0) 52811 return true 52812 } 52813 } 52814 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { 52815 b := v.Block 52816 // match: (Leq8U x y) 52817 // result: (SETBE (CMPB x y)) 52818 for { 52819 y := v.Args[1] 52820 x := v.Args[0] 52821 v.reset(OpAMD64SETBE) 52822 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 52823 v0.AddArg(x) 52824 v0.AddArg(y) 52825 v.AddArg(v0) 52826 return true 52827 } 52828 } 52829 func rewriteValueAMD64_OpLess16_0(v *Value) bool { 52830 b := v.Block 52831 // match: (Less16 x y) 52832 // result: (SETL (CMPW x y)) 52833 for { 52834 y := v.Args[1] 52835 x := v.Args[0] 52836 v.reset(OpAMD64SETL) 52837 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 52838 v0.AddArg(x) 52839 v0.AddArg(y) 52840 v.AddArg(v0) 52841 return true 52842 } 52843 } 52844 func rewriteValueAMD64_OpLess16U_0(v *Value) bool { 52845 b := v.Block 52846 // match: (Less16U x y) 52847 // result: (SETB (CMPW x y)) 52848 for { 52849 y := v.Args[1] 52850 x := v.Args[0] 52851 v.reset(OpAMD64SETB) 52852 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 52853 v0.AddArg(x) 52854 v0.AddArg(y) 52855 v.AddArg(v0) 52856 return true 52857 } 52858 } 52859 func rewriteValueAMD64_OpLess32_0(v *Value) bool { 52860 b := v.Block 52861 // match: (Less32 x y) 52862 // result: (SETL (CMPL x y)) 52863 for { 52864 y := v.Args[1] 52865 x := v.Args[0] 52866 v.reset(OpAMD64SETL) 52867 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 52868 v0.AddArg(x) 52869 v0.AddArg(y) 52870 v.AddArg(v0) 52871 return true 52872 } 52873 } 52874 func rewriteValueAMD64_OpLess32F_0(v *Value) bool { 52875 b := v.Block 52876 // match: (Less32F x y) 52877 // result: (SETGF (UCOMISS y x)) 52878 for { 52879 y := v.Args[1] 52880 x := v.Args[0] 52881 v.reset(OpAMD64SETGF) 52882 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 52883 v0.AddArg(y) 52884 v0.AddArg(x) 52885 v.AddArg(v0) 52886 return true 52887 } 52888 } 52889 func rewriteValueAMD64_OpLess32U_0(v *Value) bool { 52890 b := v.Block 52891 // match: (Less32U x y) 52892 // result: (SETB (CMPL x y)) 52893 for { 52894 y := v.Args[1] 52895 x := v.Args[0] 52896 v.reset(OpAMD64SETB) 52897 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 52898 v0.AddArg(x) 52899 v0.AddArg(y) 52900 v.AddArg(v0) 52901 return true 52902 } 52903 } 52904 func rewriteValueAMD64_OpLess64_0(v *Value) bool { 52905 b := v.Block 52906 // match: (Less64 x y) 52907 // result: (SETL (CMPQ x y)) 52908 for { 52909 y := v.Args[1] 52910 x := v.Args[0] 52911 v.reset(OpAMD64SETL) 52912 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 52913 v0.AddArg(x) 52914 v0.AddArg(y) 52915 v.AddArg(v0) 52916 return true 52917 } 52918 } 52919 func rewriteValueAMD64_OpLess64F_0(v *Value) bool { 52920 b := v.Block 52921 // match: (Less64F x y) 52922 // result: (SETGF (UCOMISD y x)) 52923 for { 52924 y := v.Args[1] 52925 x := v.Args[0] 52926 v.reset(OpAMD64SETGF) 52927 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 52928 v0.AddArg(y) 52929 v0.AddArg(x) 52930 v.AddArg(v0) 52931 return true 52932 } 52933 } 52934 func rewriteValueAMD64_OpLess64U_0(v *Value) bool { 52935 b := v.Block 52936 // match: (Less64U x y) 52937 // result: (SETB (CMPQ x y)) 52938 for { 52939 y := v.Args[1] 52940 x := v.Args[0] 52941 v.reset(OpAMD64SETB) 52942 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 52943 v0.AddArg(x) 52944 v0.AddArg(y) 52945 v.AddArg(v0) 52946 return true 52947 } 52948 } 52949 func rewriteValueAMD64_OpLess8_0(v *Value) bool { 52950 b := v.Block 52951 // match: (Less8 x y) 52952 // result: (SETL (CMPB x y)) 52953 for { 52954 y := v.Args[1] 52955 x := v.Args[0] 52956 v.reset(OpAMD64SETL) 52957 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 52958 v0.AddArg(x) 52959 v0.AddArg(y) 52960 v.AddArg(v0) 52961 return true 52962 } 52963 } 52964 func rewriteValueAMD64_OpLess8U_0(v *Value) bool { 52965 b := v.Block 52966 // match: (Less8U x y) 52967 // result: (SETB (CMPB x y)) 52968 for { 52969 y := v.Args[1] 52970 x := v.Args[0] 52971 v.reset(OpAMD64SETB) 52972 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 52973 v0.AddArg(x) 52974 v0.AddArg(y) 52975 v.AddArg(v0) 52976 return true 52977 } 52978 } 52979 func rewriteValueAMD64_OpLoad_0(v *Value) bool { 52980 // match: (Load <t> ptr mem) 52981 // cond: (is64BitInt(t) || isPtr(t)) 52982 // result: (MOVQload ptr mem) 52983 for { 52984 t := v.Type 52985 mem := v.Args[1] 52986 ptr := v.Args[0] 52987 if !(is64BitInt(t) || isPtr(t)) { 52988 break 52989 } 52990 v.reset(OpAMD64MOVQload) 52991 v.AddArg(ptr) 52992 v.AddArg(mem) 52993 return true 52994 } 52995 // match: (Load <t> ptr mem) 52996 // cond: is32BitInt(t) 52997 // result: (MOVLload ptr mem) 52998 for { 52999 t := v.Type 53000 mem := v.Args[1] 53001 ptr := v.Args[0] 53002 if !(is32BitInt(t)) { 53003 break 53004 } 53005 v.reset(OpAMD64MOVLload) 53006 v.AddArg(ptr) 53007 v.AddArg(mem) 53008 return true 53009 } 53010 // match: (Load <t> ptr mem) 53011 // cond: is16BitInt(t) 53012 // result: (MOVWload ptr mem) 53013 for { 53014 t := v.Type 53015 mem := v.Args[1] 53016 ptr := v.Args[0] 53017 if !(is16BitInt(t)) { 53018 break 53019 } 53020 v.reset(OpAMD64MOVWload) 53021 v.AddArg(ptr) 53022 v.AddArg(mem) 53023 return true 53024 } 53025 // match: (Load <t> ptr mem) 53026 // cond: (t.IsBoolean() || is8BitInt(t)) 53027 // result: (MOVBload ptr mem) 53028 for { 53029 t := v.Type 53030 mem := v.Args[1] 53031 ptr := v.Args[0] 53032 if !(t.IsBoolean() || is8BitInt(t)) { 53033 break 53034 } 53035 v.reset(OpAMD64MOVBload) 53036 v.AddArg(ptr) 53037 v.AddArg(mem) 53038 return true 53039 } 53040 // match: (Load <t> ptr mem) 53041 // cond: is32BitFloat(t) 53042 // result: (MOVSSload ptr mem) 53043 for { 53044 t := v.Type 53045 mem := v.Args[1] 53046 ptr := v.Args[0] 53047 if !(is32BitFloat(t)) { 53048 break 53049 } 53050 v.reset(OpAMD64MOVSSload) 53051 v.AddArg(ptr) 53052 v.AddArg(mem) 53053 return true 53054 } 53055 // match: (Load <t> ptr mem) 53056 // cond: is64BitFloat(t) 53057 // result: (MOVSDload ptr mem) 53058 for { 53059 t := v.Type 53060 mem := v.Args[1] 53061 ptr := v.Args[0] 53062 if !(is64BitFloat(t)) { 53063 break 53064 } 53065 v.reset(OpAMD64MOVSDload) 53066 v.AddArg(ptr) 53067 v.AddArg(mem) 53068 return true 53069 } 53070 return false 53071 } 53072 func rewriteValueAMD64_OpLocalAddr_0(v *Value) bool { 53073 // match: (LocalAddr {sym} base _) 53074 // result: (LEAQ {sym} base) 53075 for { 53076 sym := v.Aux 53077 _ = v.Args[1] 53078 base := v.Args[0] 53079 v.reset(OpAMD64LEAQ) 53080 v.Aux = sym 53081 v.AddArg(base) 53082 return true 53083 } 53084 } 53085 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { 53086 b := v.Block 53087 // match: (Lsh16x16 <t> x y) 53088 // cond: !shiftIsBounded(v) 53089 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 53090 for { 53091 t := v.Type 53092 y := v.Args[1] 53093 x := v.Args[0] 53094 if !(!shiftIsBounded(v)) { 53095 break 53096 } 53097 v.reset(OpAMD64ANDL) 53098 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 53099 v0.AddArg(x) 53100 v0.AddArg(y) 53101 v.AddArg(v0) 53102 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 53103 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 53104 v2.AuxInt = 32 53105 v2.AddArg(y) 53106 v1.AddArg(v2) 53107 v.AddArg(v1) 53108 return true 53109 } 53110 // match: (Lsh16x16 x y) 53111 // cond: shiftIsBounded(v) 53112 // result: (SHLL x y) 53113 for { 53114 y := v.Args[1] 53115 x := v.Args[0] 53116 if !(shiftIsBounded(v)) { 53117 break 53118 } 53119 v.reset(OpAMD64SHLL) 53120 v.AddArg(x) 53121 v.AddArg(y) 53122 return true 53123 } 53124 return false 53125 } 53126 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { 53127 b := v.Block 53128 // match: (Lsh16x32 <t> x y) 53129 // cond: !shiftIsBounded(v) 53130 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 53131 for { 53132 t := v.Type 53133 y := v.Args[1] 53134 x := v.Args[0] 53135 if !(!shiftIsBounded(v)) { 53136 break 53137 } 53138 v.reset(OpAMD64ANDL) 53139 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 53140 v0.AddArg(x) 53141 v0.AddArg(y) 53142 v.AddArg(v0) 53143 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 53144 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 53145 v2.AuxInt = 32 53146 v2.AddArg(y) 53147 v1.AddArg(v2) 53148 v.AddArg(v1) 53149 return true 53150 } 53151 // match: (Lsh16x32 x y) 53152 // cond: shiftIsBounded(v) 53153 // result: (SHLL x y) 53154 for { 53155 y := v.Args[1] 53156 x := v.Args[0] 53157 if !(shiftIsBounded(v)) { 53158 break 53159 } 53160 v.reset(OpAMD64SHLL) 53161 v.AddArg(x) 53162 v.AddArg(y) 53163 return true 53164 } 53165 return false 53166 } 53167 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { 53168 b := v.Block 53169 // match: (Lsh16x64 <t> x y) 53170 // cond: !shiftIsBounded(v) 53171 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 53172 for { 53173 t := v.Type 53174 y := v.Args[1] 53175 x := v.Args[0] 53176 if !(!shiftIsBounded(v)) { 53177 break 53178 } 53179 v.reset(OpAMD64ANDL) 53180 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 53181 v0.AddArg(x) 53182 v0.AddArg(y) 53183 v.AddArg(v0) 53184 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 53185 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 53186 v2.AuxInt = 32 53187 v2.AddArg(y) 53188 v1.AddArg(v2) 53189 v.AddArg(v1) 53190 return true 53191 } 53192 // match: (Lsh16x64 x y) 53193 // cond: shiftIsBounded(v) 53194 // result: (SHLL x y) 53195 for { 53196 y := v.Args[1] 53197 x := v.Args[0] 53198 if !(shiftIsBounded(v)) { 53199 break 53200 } 53201 v.reset(OpAMD64SHLL) 53202 v.AddArg(x) 53203 v.AddArg(y) 53204 return true 53205 } 53206 return false 53207 } 53208 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { 53209 b := v.Block 53210 // match: (Lsh16x8 <t> x y) 53211 // cond: !shiftIsBounded(v) 53212 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 53213 for { 53214 t := v.Type 53215 y := v.Args[1] 53216 x := v.Args[0] 53217 if !(!shiftIsBounded(v)) { 53218 break 53219 } 53220 v.reset(OpAMD64ANDL) 53221 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 53222 v0.AddArg(x) 53223 v0.AddArg(y) 53224 v.AddArg(v0) 53225 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 53226 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 53227 v2.AuxInt = 32 53228 v2.AddArg(y) 53229 v1.AddArg(v2) 53230 v.AddArg(v1) 53231 return true 53232 } 53233 // match: (Lsh16x8 x y) 53234 // cond: shiftIsBounded(v) 53235 // result: (SHLL x y) 53236 for { 53237 y := v.Args[1] 53238 x := v.Args[0] 53239 if !(shiftIsBounded(v)) { 53240 break 53241 } 53242 v.reset(OpAMD64SHLL) 53243 v.AddArg(x) 53244 v.AddArg(y) 53245 return true 53246 } 53247 return false 53248 } 53249 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { 53250 b := v.Block 53251 // match: (Lsh32x16 <t> x y) 53252 // cond: !shiftIsBounded(v) 53253 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 53254 for { 53255 t := v.Type 53256 y := v.Args[1] 53257 x := v.Args[0] 53258 if !(!shiftIsBounded(v)) { 53259 break 53260 } 53261 v.reset(OpAMD64ANDL) 53262 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 53263 v0.AddArg(x) 53264 v0.AddArg(y) 53265 v.AddArg(v0) 53266 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 53267 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 53268 v2.AuxInt = 32 53269 v2.AddArg(y) 53270 v1.AddArg(v2) 53271 v.AddArg(v1) 53272 return true 53273 } 53274 // match: (Lsh32x16 x y) 53275 // cond: shiftIsBounded(v) 53276 // result: (SHLL x y) 53277 for { 53278 y := v.Args[1] 53279 x := v.Args[0] 53280 if !(shiftIsBounded(v)) { 53281 break 53282 } 53283 v.reset(OpAMD64SHLL) 53284 v.AddArg(x) 53285 v.AddArg(y) 53286 return true 53287 } 53288 return false 53289 } 53290 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { 53291 b := v.Block 53292 // match: (Lsh32x32 <t> x y) 53293 // cond: !shiftIsBounded(v) 53294 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 53295 for { 53296 t := v.Type 53297 y := v.Args[1] 53298 x := v.Args[0] 53299 if !(!shiftIsBounded(v)) { 53300 break 53301 } 53302 v.reset(OpAMD64ANDL) 53303 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 53304 v0.AddArg(x) 53305 v0.AddArg(y) 53306 v.AddArg(v0) 53307 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 53308 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 53309 v2.AuxInt = 32 53310 v2.AddArg(y) 53311 v1.AddArg(v2) 53312 v.AddArg(v1) 53313 return true 53314 } 53315 // match: (Lsh32x32 x y) 53316 // cond: shiftIsBounded(v) 53317 // result: (SHLL x y) 53318 for { 53319 y := v.Args[1] 53320 x := v.Args[0] 53321 if !(shiftIsBounded(v)) { 53322 break 53323 } 53324 v.reset(OpAMD64SHLL) 53325 v.AddArg(x) 53326 v.AddArg(y) 53327 return true 53328 } 53329 return false 53330 } 53331 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { 53332 b := v.Block 53333 // match: (Lsh32x64 <t> x y) 53334 // cond: !shiftIsBounded(v) 53335 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 53336 for { 53337 t := v.Type 53338 y := v.Args[1] 53339 x := v.Args[0] 53340 if !(!shiftIsBounded(v)) { 53341 break 53342 } 53343 v.reset(OpAMD64ANDL) 53344 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 53345 v0.AddArg(x) 53346 v0.AddArg(y) 53347 v.AddArg(v0) 53348 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 53349 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 53350 v2.AuxInt = 32 53351 v2.AddArg(y) 53352 v1.AddArg(v2) 53353 v.AddArg(v1) 53354 return true 53355 } 53356 // match: (Lsh32x64 x y) 53357 // cond: shiftIsBounded(v) 53358 // result: (SHLL x y) 53359 for { 53360 y := v.Args[1] 53361 x := v.Args[0] 53362 if !(shiftIsBounded(v)) { 53363 break 53364 } 53365 v.reset(OpAMD64SHLL) 53366 v.AddArg(x) 53367 v.AddArg(y) 53368 return true 53369 } 53370 return false 53371 } 53372 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { 53373 b := v.Block 53374 // match: (Lsh32x8 <t> x y) 53375 // cond: !shiftIsBounded(v) 53376 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 53377 for { 53378 t := v.Type 53379 y := v.Args[1] 53380 x := v.Args[0] 53381 if !(!shiftIsBounded(v)) { 53382 break 53383 } 53384 v.reset(OpAMD64ANDL) 53385 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 53386 v0.AddArg(x) 53387 v0.AddArg(y) 53388 v.AddArg(v0) 53389 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 53390 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 53391 v2.AuxInt = 32 53392 v2.AddArg(y) 53393 v1.AddArg(v2) 53394 v.AddArg(v1) 53395 return true 53396 } 53397 // match: (Lsh32x8 x y) 53398 // cond: shiftIsBounded(v) 53399 // result: (SHLL x y) 53400 for { 53401 y := v.Args[1] 53402 x := v.Args[0] 53403 if !(shiftIsBounded(v)) { 53404 break 53405 } 53406 v.reset(OpAMD64SHLL) 53407 v.AddArg(x) 53408 v.AddArg(y) 53409 return true 53410 } 53411 return false 53412 } 53413 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { 53414 b := v.Block 53415 // match: (Lsh64x16 <t> x y) 53416 // cond: !shiftIsBounded(v) 53417 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 53418 for { 53419 t := v.Type 53420 y := v.Args[1] 53421 x := v.Args[0] 53422 if !(!shiftIsBounded(v)) { 53423 break 53424 } 53425 v.reset(OpAMD64ANDQ) 53426 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 53427 v0.AddArg(x) 53428 v0.AddArg(y) 53429 v.AddArg(v0) 53430 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 53431 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 53432 v2.AuxInt = 64 53433 v2.AddArg(y) 53434 v1.AddArg(v2) 53435 v.AddArg(v1) 53436 return true 53437 } 53438 // match: (Lsh64x16 x y) 53439 // cond: shiftIsBounded(v) 53440 // result: (SHLQ x y) 53441 for { 53442 y := v.Args[1] 53443 x := v.Args[0] 53444 if !(shiftIsBounded(v)) { 53445 break 53446 } 53447 v.reset(OpAMD64SHLQ) 53448 v.AddArg(x) 53449 v.AddArg(y) 53450 return true 53451 } 53452 return false 53453 } 53454 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { 53455 b := v.Block 53456 // match: (Lsh64x32 <t> x y) 53457 // cond: !shiftIsBounded(v) 53458 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 53459 for { 53460 t := v.Type 53461 y := v.Args[1] 53462 x := v.Args[0] 53463 if !(!shiftIsBounded(v)) { 53464 break 53465 } 53466 v.reset(OpAMD64ANDQ) 53467 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 53468 v0.AddArg(x) 53469 v0.AddArg(y) 53470 v.AddArg(v0) 53471 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 53472 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 53473 v2.AuxInt = 64 53474 v2.AddArg(y) 53475 v1.AddArg(v2) 53476 v.AddArg(v1) 53477 return true 53478 } 53479 // match: (Lsh64x32 x y) 53480 // cond: shiftIsBounded(v) 53481 // result: (SHLQ x y) 53482 for { 53483 y := v.Args[1] 53484 x := v.Args[0] 53485 if !(shiftIsBounded(v)) { 53486 break 53487 } 53488 v.reset(OpAMD64SHLQ) 53489 v.AddArg(x) 53490 v.AddArg(y) 53491 return true 53492 } 53493 return false 53494 } 53495 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { 53496 b := v.Block 53497 // match: (Lsh64x64 <t> x y) 53498 // cond: !shiftIsBounded(v) 53499 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 53500 for { 53501 t := v.Type 53502 y := v.Args[1] 53503 x := v.Args[0] 53504 if !(!shiftIsBounded(v)) { 53505 break 53506 } 53507 v.reset(OpAMD64ANDQ) 53508 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 53509 v0.AddArg(x) 53510 v0.AddArg(y) 53511 v.AddArg(v0) 53512 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 53513 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 53514 v2.AuxInt = 64 53515 v2.AddArg(y) 53516 v1.AddArg(v2) 53517 v.AddArg(v1) 53518 return true 53519 } 53520 // match: (Lsh64x64 x y) 53521 // cond: shiftIsBounded(v) 53522 // result: (SHLQ x y) 53523 for { 53524 y := v.Args[1] 53525 x := v.Args[0] 53526 if !(shiftIsBounded(v)) { 53527 break 53528 } 53529 v.reset(OpAMD64SHLQ) 53530 v.AddArg(x) 53531 v.AddArg(y) 53532 return true 53533 } 53534 return false 53535 } 53536 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { 53537 b := v.Block 53538 // match: (Lsh64x8 <t> x y) 53539 // cond: !shiftIsBounded(v) 53540 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 53541 for { 53542 t := v.Type 53543 y := v.Args[1] 53544 x := v.Args[0] 53545 if !(!shiftIsBounded(v)) { 53546 break 53547 } 53548 v.reset(OpAMD64ANDQ) 53549 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 53550 v0.AddArg(x) 53551 v0.AddArg(y) 53552 v.AddArg(v0) 53553 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 53554 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 53555 v2.AuxInt = 64 53556 v2.AddArg(y) 53557 v1.AddArg(v2) 53558 v.AddArg(v1) 53559 return true 53560 } 53561 // match: (Lsh64x8 x y) 53562 // cond: shiftIsBounded(v) 53563 // result: (SHLQ x y) 53564 for { 53565 y := v.Args[1] 53566 x := v.Args[0] 53567 if !(shiftIsBounded(v)) { 53568 break 53569 } 53570 v.reset(OpAMD64SHLQ) 53571 v.AddArg(x) 53572 v.AddArg(y) 53573 return true 53574 } 53575 return false 53576 } 53577 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { 53578 b := v.Block 53579 // match: (Lsh8x16 <t> x y) 53580 // cond: !shiftIsBounded(v) 53581 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 53582 for { 53583 t := v.Type 53584 y := v.Args[1] 53585 x := v.Args[0] 53586 if !(!shiftIsBounded(v)) { 53587 break 53588 } 53589 v.reset(OpAMD64ANDL) 53590 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 53591 v0.AddArg(x) 53592 v0.AddArg(y) 53593 v.AddArg(v0) 53594 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 53595 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 53596 v2.AuxInt = 32 53597 v2.AddArg(y) 53598 v1.AddArg(v2) 53599 v.AddArg(v1) 53600 return true 53601 } 53602 // match: (Lsh8x16 x y) 53603 // cond: shiftIsBounded(v) 53604 // result: (SHLL x y) 53605 for { 53606 y := v.Args[1] 53607 x := v.Args[0] 53608 if !(shiftIsBounded(v)) { 53609 break 53610 } 53611 v.reset(OpAMD64SHLL) 53612 v.AddArg(x) 53613 v.AddArg(y) 53614 return true 53615 } 53616 return false 53617 } 53618 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { 53619 b := v.Block 53620 // match: (Lsh8x32 <t> x y) 53621 // cond: !shiftIsBounded(v) 53622 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 53623 for { 53624 t := v.Type 53625 y := v.Args[1] 53626 x := v.Args[0] 53627 if !(!shiftIsBounded(v)) { 53628 break 53629 } 53630 v.reset(OpAMD64ANDL) 53631 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 53632 v0.AddArg(x) 53633 v0.AddArg(y) 53634 v.AddArg(v0) 53635 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 53636 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 53637 v2.AuxInt = 32 53638 v2.AddArg(y) 53639 v1.AddArg(v2) 53640 v.AddArg(v1) 53641 return true 53642 } 53643 // match: (Lsh8x32 x y) 53644 // cond: shiftIsBounded(v) 53645 // result: (SHLL x y) 53646 for { 53647 y := v.Args[1] 53648 x := v.Args[0] 53649 if !(shiftIsBounded(v)) { 53650 break 53651 } 53652 v.reset(OpAMD64SHLL) 53653 v.AddArg(x) 53654 v.AddArg(y) 53655 return true 53656 } 53657 return false 53658 } 53659 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { 53660 b := v.Block 53661 // match: (Lsh8x64 <t> x y) 53662 // cond: !shiftIsBounded(v) 53663 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 53664 for { 53665 t := v.Type 53666 y := v.Args[1] 53667 x := v.Args[0] 53668 if !(!shiftIsBounded(v)) { 53669 break 53670 } 53671 v.reset(OpAMD64ANDL) 53672 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 53673 v0.AddArg(x) 53674 v0.AddArg(y) 53675 v.AddArg(v0) 53676 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 53677 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 53678 v2.AuxInt = 32 53679 v2.AddArg(y) 53680 v1.AddArg(v2) 53681 v.AddArg(v1) 53682 return true 53683 } 53684 // match: (Lsh8x64 x y) 53685 // cond: shiftIsBounded(v) 53686 // result: (SHLL x y) 53687 for { 53688 y := v.Args[1] 53689 x := v.Args[0] 53690 if !(shiftIsBounded(v)) { 53691 break 53692 } 53693 v.reset(OpAMD64SHLL) 53694 v.AddArg(x) 53695 v.AddArg(y) 53696 return true 53697 } 53698 return false 53699 } 53700 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { 53701 b := v.Block 53702 // match: (Lsh8x8 <t> x y) 53703 // cond: !shiftIsBounded(v) 53704 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 53705 for { 53706 t := v.Type 53707 y := v.Args[1] 53708 x := v.Args[0] 53709 if !(!shiftIsBounded(v)) { 53710 break 53711 } 53712 v.reset(OpAMD64ANDL) 53713 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 53714 v0.AddArg(x) 53715 v0.AddArg(y) 53716 v.AddArg(v0) 53717 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 53718 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 53719 v2.AuxInt = 32 53720 v2.AddArg(y) 53721 v1.AddArg(v2) 53722 v.AddArg(v1) 53723 return true 53724 } 53725 // match: (Lsh8x8 x y) 53726 // cond: shiftIsBounded(v) 53727 // result: (SHLL x y) 53728 for { 53729 y := v.Args[1] 53730 x := v.Args[0] 53731 if !(shiftIsBounded(v)) { 53732 break 53733 } 53734 v.reset(OpAMD64SHLL) 53735 v.AddArg(x) 53736 v.AddArg(y) 53737 return true 53738 } 53739 return false 53740 } 53741 func rewriteValueAMD64_OpMod16_0(v *Value) bool { 53742 b := v.Block 53743 typ := &b.Func.Config.Types 53744 // match: (Mod16 [a] x y) 53745 // result: (Select1 (DIVW [a] x y)) 53746 for { 53747 a := v.AuxInt 53748 y := v.Args[1] 53749 x := v.Args[0] 53750 v.reset(OpSelect1) 53751 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 53752 v0.AuxInt = a 53753 v0.AddArg(x) 53754 v0.AddArg(y) 53755 v.AddArg(v0) 53756 return true 53757 } 53758 } 53759 func rewriteValueAMD64_OpMod16u_0(v *Value) bool { 53760 b := v.Block 53761 typ := &b.Func.Config.Types 53762 // match: (Mod16u x y) 53763 // result: (Select1 (DIVWU x y)) 53764 for { 53765 y := v.Args[1] 53766 x := v.Args[0] 53767 v.reset(OpSelect1) 53768 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 53769 v0.AddArg(x) 53770 v0.AddArg(y) 53771 v.AddArg(v0) 53772 return true 53773 } 53774 } 53775 func rewriteValueAMD64_OpMod32_0(v *Value) bool { 53776 b := v.Block 53777 typ := &b.Func.Config.Types 53778 // match: (Mod32 [a] x y) 53779 // result: (Select1 (DIVL [a] x y)) 53780 for { 53781 a := v.AuxInt 53782 y := v.Args[1] 53783 x := v.Args[0] 53784 v.reset(OpSelect1) 53785 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 53786 v0.AuxInt = a 53787 v0.AddArg(x) 53788 v0.AddArg(y) 53789 v.AddArg(v0) 53790 return true 53791 } 53792 } 53793 func rewriteValueAMD64_OpMod32u_0(v *Value) bool { 53794 b := v.Block 53795 typ := &b.Func.Config.Types 53796 // match: (Mod32u x y) 53797 // result: (Select1 (DIVLU x y)) 53798 for { 53799 y := v.Args[1] 53800 x := v.Args[0] 53801 v.reset(OpSelect1) 53802 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 53803 v0.AddArg(x) 53804 v0.AddArg(y) 53805 v.AddArg(v0) 53806 return true 53807 } 53808 } 53809 func rewriteValueAMD64_OpMod64_0(v *Value) bool { 53810 b := v.Block 53811 typ := &b.Func.Config.Types 53812 // match: (Mod64 [a] x y) 53813 // result: (Select1 (DIVQ [a] x y)) 53814 for { 53815 a := v.AuxInt 53816 y := v.Args[1] 53817 x := v.Args[0] 53818 v.reset(OpSelect1) 53819 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 53820 v0.AuxInt = a 53821 v0.AddArg(x) 53822 v0.AddArg(y) 53823 v.AddArg(v0) 53824 return true 53825 } 53826 } 53827 func rewriteValueAMD64_OpMod64u_0(v *Value) bool { 53828 b := v.Block 53829 typ := &b.Func.Config.Types 53830 // match: (Mod64u x y) 53831 // result: (Select1 (DIVQU x y)) 53832 for { 53833 y := v.Args[1] 53834 x := v.Args[0] 53835 v.reset(OpSelect1) 53836 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 53837 v0.AddArg(x) 53838 v0.AddArg(y) 53839 v.AddArg(v0) 53840 return true 53841 } 53842 } 53843 func rewriteValueAMD64_OpMod8_0(v *Value) bool { 53844 b := v.Block 53845 typ := &b.Func.Config.Types 53846 // match: (Mod8 x y) 53847 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 53848 for { 53849 y := v.Args[1] 53850 x := v.Args[0] 53851 v.reset(OpSelect1) 53852 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 53853 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 53854 v1.AddArg(x) 53855 v0.AddArg(v1) 53856 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 53857 v2.AddArg(y) 53858 v0.AddArg(v2) 53859 v.AddArg(v0) 53860 return true 53861 } 53862 } 53863 func rewriteValueAMD64_OpMod8u_0(v *Value) bool { 53864 b := v.Block 53865 typ := &b.Func.Config.Types 53866 // match: (Mod8u x y) 53867 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 53868 for { 53869 y := v.Args[1] 53870 x := v.Args[0] 53871 v.reset(OpSelect1) 53872 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 53873 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 53874 v1.AddArg(x) 53875 v0.AddArg(v1) 53876 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 53877 v2.AddArg(y) 53878 v0.AddArg(v2) 53879 v.AddArg(v0) 53880 return true 53881 } 53882 } 53883 func rewriteValueAMD64_OpMove_0(v *Value) bool { 53884 b := v.Block 53885 config := b.Func.Config 53886 typ := &b.Func.Config.Types 53887 // match: (Move [0] _ _ mem) 53888 // result: mem 53889 for { 53890 if v.AuxInt != 0 { 53891 break 53892 } 53893 mem := v.Args[2] 53894 v.reset(OpCopy) 53895 v.Type = mem.Type 53896 v.AddArg(mem) 53897 return true 53898 } 53899 // match: (Move [1] dst src mem) 53900 // result: (MOVBstore dst (MOVBload src mem) mem) 53901 for { 53902 if v.AuxInt != 1 { 53903 break 53904 } 53905 mem := v.Args[2] 53906 dst := v.Args[0] 53907 src := v.Args[1] 53908 v.reset(OpAMD64MOVBstore) 53909 v.AddArg(dst) 53910 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 53911 v0.AddArg(src) 53912 v0.AddArg(mem) 53913 v.AddArg(v0) 53914 v.AddArg(mem) 53915 return true 53916 } 53917 // match: (Move [2] dst src mem) 53918 // result: (MOVWstore dst (MOVWload src mem) mem) 53919 for { 53920 if v.AuxInt != 2 { 53921 break 53922 } 53923 mem := v.Args[2] 53924 dst := v.Args[0] 53925 src := v.Args[1] 53926 v.reset(OpAMD64MOVWstore) 53927 v.AddArg(dst) 53928 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 53929 v0.AddArg(src) 53930 v0.AddArg(mem) 53931 v.AddArg(v0) 53932 v.AddArg(mem) 53933 return true 53934 } 53935 // match: (Move [4] dst src mem) 53936 // result: (MOVLstore dst (MOVLload src mem) mem) 53937 for { 53938 if v.AuxInt != 4 { 53939 break 53940 } 53941 mem := v.Args[2] 53942 dst := v.Args[0] 53943 src := v.Args[1] 53944 v.reset(OpAMD64MOVLstore) 53945 v.AddArg(dst) 53946 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 53947 v0.AddArg(src) 53948 v0.AddArg(mem) 53949 v.AddArg(v0) 53950 v.AddArg(mem) 53951 return true 53952 } 53953 // match: (Move [8] dst src mem) 53954 // result: (MOVQstore dst (MOVQload src mem) mem) 53955 for { 53956 if v.AuxInt != 8 { 53957 break 53958 } 53959 mem := v.Args[2] 53960 dst := v.Args[0] 53961 src := v.Args[1] 53962 v.reset(OpAMD64MOVQstore) 53963 v.AddArg(dst) 53964 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 53965 v0.AddArg(src) 53966 v0.AddArg(mem) 53967 v.AddArg(v0) 53968 v.AddArg(mem) 53969 return true 53970 } 53971 // match: (Move [16] dst src mem) 53972 // cond: config.useSSE 53973 // result: (MOVOstore dst (MOVOload src mem) mem) 53974 for { 53975 if v.AuxInt != 16 { 53976 break 53977 } 53978 mem := v.Args[2] 53979 dst := v.Args[0] 53980 src := v.Args[1] 53981 if !(config.useSSE) { 53982 break 53983 } 53984 v.reset(OpAMD64MOVOstore) 53985 v.AddArg(dst) 53986 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 53987 v0.AddArg(src) 53988 v0.AddArg(mem) 53989 v.AddArg(v0) 53990 v.AddArg(mem) 53991 return true 53992 } 53993 // match: (Move [16] dst src mem) 53994 // cond: !config.useSSE 53995 // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 53996 for { 53997 if v.AuxInt != 16 { 53998 break 53999 } 54000 mem := v.Args[2] 54001 dst := v.Args[0] 54002 src := v.Args[1] 54003 if !(!config.useSSE) { 54004 break 54005 } 54006 v.reset(OpAMD64MOVQstore) 54007 v.AuxInt = 8 54008 v.AddArg(dst) 54009 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 54010 v0.AuxInt = 8 54011 v0.AddArg(src) 54012 v0.AddArg(mem) 54013 v.AddArg(v0) 54014 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 54015 v1.AddArg(dst) 54016 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 54017 v2.AddArg(src) 54018 v2.AddArg(mem) 54019 v1.AddArg(v2) 54020 v1.AddArg(mem) 54021 v.AddArg(v1) 54022 return true 54023 } 54024 // match: (Move [32] dst src mem) 54025 // result: (Move [16] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem)) 54026 for { 54027 if v.AuxInt != 32 { 54028 break 54029 } 54030 mem := v.Args[2] 54031 dst := v.Args[0] 54032 src := v.Args[1] 54033 v.reset(OpMove) 54034 v.AuxInt = 16 54035 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 54036 v0.AuxInt = 16 54037 v0.AddArg(dst) 54038 v.AddArg(v0) 54039 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 54040 v1.AuxInt = 16 54041 v1.AddArg(src) 54042 v.AddArg(v1) 54043 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 54044 v2.AuxInt = 16 54045 v2.AddArg(dst) 54046 v2.AddArg(src) 54047 v2.AddArg(mem) 54048 v.AddArg(v2) 54049 return true 54050 } 54051 // match: (Move [48] dst src mem) 54052 // cond: config.useSSE 54053 // result: (Move [32] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem)) 54054 for { 54055 if v.AuxInt != 48 { 54056 break 54057 } 54058 mem := v.Args[2] 54059 dst := v.Args[0] 54060 src := v.Args[1] 54061 if !(config.useSSE) { 54062 break 54063 } 54064 v.reset(OpMove) 54065 v.AuxInt = 32 54066 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 54067 v0.AuxInt = 16 54068 v0.AddArg(dst) 54069 v.AddArg(v0) 54070 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 54071 v1.AuxInt = 16 54072 v1.AddArg(src) 54073 v.AddArg(v1) 54074 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 54075 v2.AuxInt = 16 54076 v2.AddArg(dst) 54077 v2.AddArg(src) 54078 v2.AddArg(mem) 54079 v.AddArg(v2) 54080 return true 54081 } 54082 // match: (Move [64] dst src mem) 54083 // cond: config.useSSE 54084 // result: (Move [32] (OffPtr <dst.Type> dst [32]) (OffPtr <src.Type> src [32]) (Move [32] dst src mem)) 54085 for { 54086 if v.AuxInt != 64 { 54087 break 54088 } 54089 mem := v.Args[2] 54090 dst := v.Args[0] 54091 src := v.Args[1] 54092 if !(config.useSSE) { 54093 break 54094 } 54095 v.reset(OpMove) 54096 v.AuxInt = 32 54097 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 54098 v0.AuxInt = 32 54099 v0.AddArg(dst) 54100 v.AddArg(v0) 54101 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 54102 v1.AuxInt = 32 54103 v1.AddArg(src) 54104 v.AddArg(v1) 54105 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) 54106 v2.AuxInt = 32 54107 v2.AddArg(dst) 54108 v2.AddArg(src) 54109 v2.AddArg(mem) 54110 v.AddArg(v2) 54111 return true 54112 } 54113 return false 54114 } 54115 func rewriteValueAMD64_OpMove_10(v *Value) bool { 54116 b := v.Block 54117 config := b.Func.Config 54118 typ := &b.Func.Config.Types 54119 // match: (Move [3] dst src mem) 54120 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 54121 for { 54122 if v.AuxInt != 3 { 54123 break 54124 } 54125 mem := v.Args[2] 54126 dst := v.Args[0] 54127 src := v.Args[1] 54128 v.reset(OpAMD64MOVBstore) 54129 v.AuxInt = 2 54130 v.AddArg(dst) 54131 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 54132 v0.AuxInt = 2 54133 v0.AddArg(src) 54134 v0.AddArg(mem) 54135 v.AddArg(v0) 54136 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 54137 v1.AddArg(dst) 54138 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 54139 v2.AddArg(src) 54140 v2.AddArg(mem) 54141 v1.AddArg(v2) 54142 v1.AddArg(mem) 54143 v.AddArg(v1) 54144 return true 54145 } 54146 // match: (Move [5] dst src mem) 54147 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 54148 for { 54149 if v.AuxInt != 5 { 54150 break 54151 } 54152 mem := v.Args[2] 54153 dst := v.Args[0] 54154 src := v.Args[1] 54155 v.reset(OpAMD64MOVBstore) 54156 v.AuxInt = 4 54157 v.AddArg(dst) 54158 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 54159 v0.AuxInt = 4 54160 v0.AddArg(src) 54161 v0.AddArg(mem) 54162 v.AddArg(v0) 54163 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 54164 v1.AddArg(dst) 54165 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 54166 v2.AddArg(src) 54167 v2.AddArg(mem) 54168 v1.AddArg(v2) 54169 v1.AddArg(mem) 54170 v.AddArg(v1) 54171 return true 54172 } 54173 // match: (Move [6] dst src mem) 54174 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 54175 for { 54176 if v.AuxInt != 6 { 54177 break 54178 } 54179 mem := v.Args[2] 54180 dst := v.Args[0] 54181 src := v.Args[1] 54182 v.reset(OpAMD64MOVWstore) 54183 v.AuxInt = 4 54184 v.AddArg(dst) 54185 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 54186 v0.AuxInt = 4 54187 v0.AddArg(src) 54188 v0.AddArg(mem) 54189 v.AddArg(v0) 54190 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 54191 v1.AddArg(dst) 54192 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 54193 v2.AddArg(src) 54194 v2.AddArg(mem) 54195 v1.AddArg(v2) 54196 v1.AddArg(mem) 54197 v.AddArg(v1) 54198 return true 54199 } 54200 // match: (Move [7] dst src mem) 54201 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 54202 for { 54203 if v.AuxInt != 7 { 54204 break 54205 } 54206 mem := v.Args[2] 54207 dst := v.Args[0] 54208 src := v.Args[1] 54209 v.reset(OpAMD64MOVLstore) 54210 v.AuxInt = 3 54211 v.AddArg(dst) 54212 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 54213 v0.AuxInt = 3 54214 v0.AddArg(src) 54215 v0.AddArg(mem) 54216 v.AddArg(v0) 54217 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 54218 v1.AddArg(dst) 54219 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 54220 v2.AddArg(src) 54221 v2.AddArg(mem) 54222 v1.AddArg(v2) 54223 v1.AddArg(mem) 54224 v.AddArg(v1) 54225 return true 54226 } 54227 // match: (Move [9] dst src mem) 54228 // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 54229 for { 54230 if v.AuxInt != 9 { 54231 break 54232 } 54233 mem := v.Args[2] 54234 dst := v.Args[0] 54235 src := v.Args[1] 54236 v.reset(OpAMD64MOVBstore) 54237 v.AuxInt = 8 54238 v.AddArg(dst) 54239 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 54240 v0.AuxInt = 8 54241 v0.AddArg(src) 54242 v0.AddArg(mem) 54243 v.AddArg(v0) 54244 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 54245 v1.AddArg(dst) 54246 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 54247 v2.AddArg(src) 54248 v2.AddArg(mem) 54249 v1.AddArg(v2) 54250 v1.AddArg(mem) 54251 v.AddArg(v1) 54252 return true 54253 } 54254 // match: (Move [10] dst src mem) 54255 // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 54256 for { 54257 if v.AuxInt != 10 { 54258 break 54259 } 54260 mem := v.Args[2] 54261 dst := v.Args[0] 54262 src := v.Args[1] 54263 v.reset(OpAMD64MOVWstore) 54264 v.AuxInt = 8 54265 v.AddArg(dst) 54266 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 54267 v0.AuxInt = 8 54268 v0.AddArg(src) 54269 v0.AddArg(mem) 54270 v.AddArg(v0) 54271 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 54272 v1.AddArg(dst) 54273 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 54274 v2.AddArg(src) 54275 v2.AddArg(mem) 54276 v1.AddArg(v2) 54277 v1.AddArg(mem) 54278 v.AddArg(v1) 54279 return true 54280 } 54281 // match: (Move [12] dst src mem) 54282 // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 54283 for { 54284 if v.AuxInt != 12 { 54285 break 54286 } 54287 mem := v.Args[2] 54288 dst := v.Args[0] 54289 src := v.Args[1] 54290 v.reset(OpAMD64MOVLstore) 54291 v.AuxInt = 8 54292 v.AddArg(dst) 54293 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 54294 v0.AuxInt = 8 54295 v0.AddArg(src) 54296 v0.AddArg(mem) 54297 v.AddArg(v0) 54298 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 54299 v1.AddArg(dst) 54300 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 54301 v2.AddArg(src) 54302 v2.AddArg(mem) 54303 v1.AddArg(v2) 54304 v1.AddArg(mem) 54305 v.AddArg(v1) 54306 return true 54307 } 54308 // match: (Move [s] dst src mem) 54309 // cond: s == 11 || s >= 13 && s <= 15 54310 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 54311 for { 54312 s := v.AuxInt 54313 mem := v.Args[2] 54314 dst := v.Args[0] 54315 src := v.Args[1] 54316 if !(s == 11 || s >= 13 && s <= 15) { 54317 break 54318 } 54319 v.reset(OpAMD64MOVQstore) 54320 v.AuxInt = s - 8 54321 v.AddArg(dst) 54322 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 54323 v0.AuxInt = s - 8 54324 v0.AddArg(src) 54325 v0.AddArg(mem) 54326 v.AddArg(v0) 54327 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 54328 v1.AddArg(dst) 54329 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 54330 v2.AddArg(src) 54331 v2.AddArg(mem) 54332 v1.AddArg(v2) 54333 v1.AddArg(mem) 54334 v.AddArg(v1) 54335 return true 54336 } 54337 // match: (Move [s] dst src mem) 54338 // cond: s > 16 && s%16 != 0 && s%16 <= 8 54339 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 54340 for { 54341 s := v.AuxInt 54342 mem := v.Args[2] 54343 dst := v.Args[0] 54344 src := v.Args[1] 54345 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 54346 break 54347 } 54348 v.reset(OpMove) 54349 v.AuxInt = s - s%16 54350 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 54351 v0.AuxInt = s % 16 54352 v0.AddArg(dst) 54353 v.AddArg(v0) 54354 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 54355 v1.AuxInt = s % 16 54356 v1.AddArg(src) 54357 v.AddArg(v1) 54358 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 54359 v2.AddArg(dst) 54360 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 54361 v3.AddArg(src) 54362 v3.AddArg(mem) 54363 v2.AddArg(v3) 54364 v2.AddArg(mem) 54365 v.AddArg(v2) 54366 return true 54367 } 54368 // match: (Move [s] dst src mem) 54369 // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE 54370 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 54371 for { 54372 s := v.AuxInt 54373 mem := v.Args[2] 54374 dst := v.Args[0] 54375 src := v.Args[1] 54376 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { 54377 break 54378 } 54379 v.reset(OpMove) 54380 v.AuxInt = s - s%16 54381 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 54382 v0.AuxInt = s % 16 54383 v0.AddArg(dst) 54384 v.AddArg(v0) 54385 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 54386 v1.AuxInt = s % 16 54387 v1.AddArg(src) 54388 v.AddArg(v1) 54389 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 54390 v2.AddArg(dst) 54391 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 54392 v3.AddArg(src) 54393 v3.AddArg(mem) 54394 v2.AddArg(v3) 54395 v2.AddArg(mem) 54396 v.AddArg(v2) 54397 return true 54398 } 54399 return false 54400 } 54401 func rewriteValueAMD64_OpMove_20(v *Value) bool { 54402 b := v.Block 54403 config := b.Func.Config 54404 typ := &b.Func.Config.Types 54405 // match: (Move [s] dst src mem) 54406 // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE 54407 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) 54408 for { 54409 s := v.AuxInt 54410 mem := v.Args[2] 54411 dst := v.Args[0] 54412 src := v.Args[1] 54413 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { 54414 break 54415 } 54416 v.reset(OpMove) 54417 v.AuxInt = s - s%16 54418 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 54419 v0.AuxInt = s % 16 54420 v0.AddArg(dst) 54421 v.AddArg(v0) 54422 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 54423 v1.AuxInt = s % 16 54424 v1.AddArg(src) 54425 v.AddArg(v1) 54426 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 54427 v2.AuxInt = 8 54428 v2.AddArg(dst) 54429 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 54430 v3.AuxInt = 8 54431 v3.AddArg(src) 54432 v3.AddArg(mem) 54433 v2.AddArg(v3) 54434 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 54435 v4.AddArg(dst) 54436 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 54437 v5.AddArg(src) 54438 v5.AddArg(mem) 54439 v4.AddArg(v5) 54440 v4.AddArg(mem) 54441 v2.AddArg(v4) 54442 v.AddArg(v2) 54443 return true 54444 } 54445 // match: (Move [s] dst src mem) 54446 // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 54447 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 54448 for { 54449 s := v.AuxInt 54450 mem := v.Args[2] 54451 dst := v.Args[0] 54452 src := v.Args[1] 54453 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 54454 break 54455 } 54456 v.reset(OpAMD64DUFFCOPY) 54457 v.AuxInt = 14 * (64 - s/16) 54458 v.AddArg(dst) 54459 v.AddArg(src) 54460 v.AddArg(mem) 54461 return true 54462 } 54463 // match: (Move [s] dst src mem) 54464 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 54465 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 54466 for { 54467 s := v.AuxInt 54468 mem := v.Args[2] 54469 dst := v.Args[0] 54470 src := v.Args[1] 54471 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 54472 break 54473 } 54474 v.reset(OpAMD64REPMOVSQ) 54475 v.AddArg(dst) 54476 v.AddArg(src) 54477 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 54478 v0.AuxInt = s / 8 54479 v.AddArg(v0) 54480 v.AddArg(mem) 54481 return true 54482 } 54483 return false 54484 } 54485 func rewriteValueAMD64_OpMul16_0(v *Value) bool { 54486 // match: (Mul16 x y) 54487 // result: (MULL x y) 54488 for { 54489 y := v.Args[1] 54490 x := v.Args[0] 54491 v.reset(OpAMD64MULL) 54492 v.AddArg(x) 54493 v.AddArg(y) 54494 return true 54495 } 54496 } 54497 func rewriteValueAMD64_OpMul32_0(v *Value) bool { 54498 // match: (Mul32 x y) 54499 // result: (MULL x y) 54500 for { 54501 y := v.Args[1] 54502 x := v.Args[0] 54503 v.reset(OpAMD64MULL) 54504 v.AddArg(x) 54505 v.AddArg(y) 54506 return true 54507 } 54508 } 54509 func rewriteValueAMD64_OpMul32F_0(v *Value) bool { 54510 // match: (Mul32F x y) 54511 // result: (MULSS x y) 54512 for { 54513 y := v.Args[1] 54514 x := v.Args[0] 54515 v.reset(OpAMD64MULSS) 54516 v.AddArg(x) 54517 v.AddArg(y) 54518 return true 54519 } 54520 } 54521 func rewriteValueAMD64_OpMul64_0(v *Value) bool { 54522 // match: (Mul64 x y) 54523 // result: (MULQ x y) 54524 for { 54525 y := v.Args[1] 54526 x := v.Args[0] 54527 v.reset(OpAMD64MULQ) 54528 v.AddArg(x) 54529 v.AddArg(y) 54530 return true 54531 } 54532 } 54533 func rewriteValueAMD64_OpMul64F_0(v *Value) bool { 54534 // match: (Mul64F x y) 54535 // result: (MULSD x y) 54536 for { 54537 y := v.Args[1] 54538 x := v.Args[0] 54539 v.reset(OpAMD64MULSD) 54540 v.AddArg(x) 54541 v.AddArg(y) 54542 return true 54543 } 54544 } 54545 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool { 54546 // match: (Mul64uhilo x y) 54547 // result: (MULQU2 x y) 54548 for { 54549 y := v.Args[1] 54550 x := v.Args[0] 54551 v.reset(OpAMD64MULQU2) 54552 v.AddArg(x) 54553 v.AddArg(y) 54554 return true 54555 } 54556 } 54557 func rewriteValueAMD64_OpMul8_0(v *Value) bool { 54558 // match: (Mul8 x y) 54559 // result: (MULL x y) 54560 for { 54561 y := v.Args[1] 54562 x := v.Args[0] 54563 v.reset(OpAMD64MULL) 54564 v.AddArg(x) 54565 v.AddArg(y) 54566 return true 54567 } 54568 } 54569 func rewriteValueAMD64_OpNeg16_0(v *Value) bool { 54570 // match: (Neg16 x) 54571 // result: (NEGL x) 54572 for { 54573 x := v.Args[0] 54574 v.reset(OpAMD64NEGL) 54575 v.AddArg(x) 54576 return true 54577 } 54578 } 54579 func rewriteValueAMD64_OpNeg32_0(v *Value) bool { 54580 // match: (Neg32 x) 54581 // result: (NEGL x) 54582 for { 54583 x := v.Args[0] 54584 v.reset(OpAMD64NEGL) 54585 v.AddArg(x) 54586 return true 54587 } 54588 } 54589 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { 54590 b := v.Block 54591 typ := &b.Func.Config.Types 54592 // match: (Neg32F x) 54593 // result: (PXOR x (MOVSSconst <typ.Float32> [auxFrom32F(float32(math.Copysign(0, -1)))])) 54594 for { 54595 x := v.Args[0] 54596 v.reset(OpAMD64PXOR) 54597 v.AddArg(x) 54598 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 54599 v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1))) 54600 v.AddArg(v0) 54601 return true 54602 } 54603 } 54604 func rewriteValueAMD64_OpNeg64_0(v *Value) bool { 54605 // match: (Neg64 x) 54606 // result: (NEGQ x) 54607 for { 54608 x := v.Args[0] 54609 v.reset(OpAMD64NEGQ) 54610 v.AddArg(x) 54611 return true 54612 } 54613 } 54614 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { 54615 b := v.Block 54616 typ := &b.Func.Config.Types 54617 // match: (Neg64F x) 54618 // result: (PXOR x (MOVSDconst <typ.Float64> [auxFrom64F(math.Copysign(0, -1))])) 54619 for { 54620 x := v.Args[0] 54621 v.reset(OpAMD64PXOR) 54622 v.AddArg(x) 54623 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 54624 v0.AuxInt = auxFrom64F(math.Copysign(0, -1)) 54625 v.AddArg(v0) 54626 return true 54627 } 54628 } 54629 func rewriteValueAMD64_OpNeg8_0(v *Value) bool { 54630 // match: (Neg8 x) 54631 // result: (NEGL x) 54632 for { 54633 x := v.Args[0] 54634 v.reset(OpAMD64NEGL) 54635 v.AddArg(x) 54636 return true 54637 } 54638 } 54639 func rewriteValueAMD64_OpNeq16_0(v *Value) bool { 54640 b := v.Block 54641 // match: (Neq16 x y) 54642 // result: (SETNE (CMPW x y)) 54643 for { 54644 y := v.Args[1] 54645 x := v.Args[0] 54646 v.reset(OpAMD64SETNE) 54647 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 54648 v0.AddArg(x) 54649 v0.AddArg(y) 54650 v.AddArg(v0) 54651 return true 54652 } 54653 } 54654 func rewriteValueAMD64_OpNeq32_0(v *Value) bool { 54655 b := v.Block 54656 // match: (Neq32 x y) 54657 // result: (SETNE (CMPL x y)) 54658 for { 54659 y := v.Args[1] 54660 x := v.Args[0] 54661 v.reset(OpAMD64SETNE) 54662 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 54663 v0.AddArg(x) 54664 v0.AddArg(y) 54665 v.AddArg(v0) 54666 return true 54667 } 54668 } 54669 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { 54670 b := v.Block 54671 // match: (Neq32F x y) 54672 // result: (SETNEF (UCOMISS x y)) 54673 for { 54674 y := v.Args[1] 54675 x := v.Args[0] 54676 v.reset(OpAMD64SETNEF) 54677 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 54678 v0.AddArg(x) 54679 v0.AddArg(y) 54680 v.AddArg(v0) 54681 return true 54682 } 54683 } 54684 func rewriteValueAMD64_OpNeq64_0(v *Value) bool { 54685 b := v.Block 54686 // match: (Neq64 x y) 54687 // result: (SETNE (CMPQ x y)) 54688 for { 54689 y := v.Args[1] 54690 x := v.Args[0] 54691 v.reset(OpAMD64SETNE) 54692 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 54693 v0.AddArg(x) 54694 v0.AddArg(y) 54695 v.AddArg(v0) 54696 return true 54697 } 54698 } 54699 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { 54700 b := v.Block 54701 // match: (Neq64F x y) 54702 // result: (SETNEF (UCOMISD x y)) 54703 for { 54704 y := v.Args[1] 54705 x := v.Args[0] 54706 v.reset(OpAMD64SETNEF) 54707 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 54708 v0.AddArg(x) 54709 v0.AddArg(y) 54710 v.AddArg(v0) 54711 return true 54712 } 54713 } 54714 func rewriteValueAMD64_OpNeq8_0(v *Value) bool { 54715 b := v.Block 54716 // match: (Neq8 x y) 54717 // result: (SETNE (CMPB x y)) 54718 for { 54719 y := v.Args[1] 54720 x := v.Args[0] 54721 v.reset(OpAMD64SETNE) 54722 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 54723 v0.AddArg(x) 54724 v0.AddArg(y) 54725 v.AddArg(v0) 54726 return true 54727 } 54728 } 54729 func rewriteValueAMD64_OpNeqB_0(v *Value) bool { 54730 b := v.Block 54731 // match: (NeqB x y) 54732 // result: (SETNE (CMPB x y)) 54733 for { 54734 y := v.Args[1] 54735 x := v.Args[0] 54736 v.reset(OpAMD64SETNE) 54737 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 54738 v0.AddArg(x) 54739 v0.AddArg(y) 54740 v.AddArg(v0) 54741 return true 54742 } 54743 } 54744 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { 54745 b := v.Block 54746 // match: (NeqPtr x y) 54747 // result: (SETNE (CMPQ x y)) 54748 for { 54749 y := v.Args[1] 54750 x := v.Args[0] 54751 v.reset(OpAMD64SETNE) 54752 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 54753 v0.AddArg(x) 54754 v0.AddArg(y) 54755 v.AddArg(v0) 54756 return true 54757 } 54758 } 54759 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { 54760 // match: (NilCheck ptr mem) 54761 // result: (LoweredNilCheck ptr mem) 54762 for { 54763 mem := v.Args[1] 54764 ptr := v.Args[0] 54765 v.reset(OpAMD64LoweredNilCheck) 54766 v.AddArg(ptr) 54767 v.AddArg(mem) 54768 return true 54769 } 54770 } 54771 func rewriteValueAMD64_OpNot_0(v *Value) bool { 54772 // match: (Not x) 54773 // result: (XORLconst [1] x) 54774 for { 54775 x := v.Args[0] 54776 v.reset(OpAMD64XORLconst) 54777 v.AuxInt = 1 54778 v.AddArg(x) 54779 return true 54780 } 54781 } 54782 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { 54783 b := v.Block 54784 typ := &b.Func.Config.Types 54785 // match: (OffPtr [off] ptr) 54786 // cond: is32Bit(off) 54787 // result: (ADDQconst [off] ptr) 54788 for { 54789 off := v.AuxInt 54790 ptr := v.Args[0] 54791 if !(is32Bit(off)) { 54792 break 54793 } 54794 v.reset(OpAMD64ADDQconst) 54795 v.AuxInt = off 54796 v.AddArg(ptr) 54797 return true 54798 } 54799 // match: (OffPtr [off] ptr) 54800 // result: (ADDQ (MOVQconst [off]) ptr) 54801 for { 54802 off := v.AuxInt 54803 ptr := v.Args[0] 54804 v.reset(OpAMD64ADDQ) 54805 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 54806 v0.AuxInt = off 54807 v.AddArg(v0) 54808 v.AddArg(ptr) 54809 return true 54810 } 54811 } 54812 func rewriteValueAMD64_OpOr16_0(v *Value) bool { 54813 // match: (Or16 x y) 54814 // result: (ORL x y) 54815 for { 54816 y := v.Args[1] 54817 x := v.Args[0] 54818 v.reset(OpAMD64ORL) 54819 v.AddArg(x) 54820 v.AddArg(y) 54821 return true 54822 } 54823 } 54824 func rewriteValueAMD64_OpOr32_0(v *Value) bool { 54825 // match: (Or32 x y) 54826 // result: (ORL x y) 54827 for { 54828 y := v.Args[1] 54829 x := v.Args[0] 54830 v.reset(OpAMD64ORL) 54831 v.AddArg(x) 54832 v.AddArg(y) 54833 return true 54834 } 54835 } 54836 func rewriteValueAMD64_OpOr64_0(v *Value) bool { 54837 // match: (Or64 x y) 54838 // result: (ORQ x y) 54839 for { 54840 y := v.Args[1] 54841 x := v.Args[0] 54842 v.reset(OpAMD64ORQ) 54843 v.AddArg(x) 54844 v.AddArg(y) 54845 return true 54846 } 54847 } 54848 func rewriteValueAMD64_OpOr8_0(v *Value) bool { 54849 // match: (Or8 x y) 54850 // result: (ORL x y) 54851 for { 54852 y := v.Args[1] 54853 x := v.Args[0] 54854 v.reset(OpAMD64ORL) 54855 v.AddArg(x) 54856 v.AddArg(y) 54857 return true 54858 } 54859 } 54860 func rewriteValueAMD64_OpOrB_0(v *Value) bool { 54861 // match: (OrB x y) 54862 // result: (ORL x y) 54863 for { 54864 y := v.Args[1] 54865 x := v.Args[0] 54866 v.reset(OpAMD64ORL) 54867 v.AddArg(x) 54868 v.AddArg(y) 54869 return true 54870 } 54871 } 54872 func rewriteValueAMD64_OpPanicBounds_0(v *Value) bool { 54873 // match: (PanicBounds [kind] x y mem) 54874 // cond: boundsABI(kind) == 0 54875 // result: (LoweredPanicBoundsA [kind] x y mem) 54876 for { 54877 kind := v.AuxInt 54878 mem := v.Args[2] 54879 x := v.Args[0] 54880 y := v.Args[1] 54881 if !(boundsABI(kind) == 0) { 54882 break 54883 } 54884 v.reset(OpAMD64LoweredPanicBoundsA) 54885 v.AuxInt = kind 54886 v.AddArg(x) 54887 v.AddArg(y) 54888 v.AddArg(mem) 54889 return true 54890 } 54891 // match: (PanicBounds [kind] x y mem) 54892 // cond: boundsABI(kind) == 1 54893 // result: (LoweredPanicBoundsB [kind] x y mem) 54894 for { 54895 kind := v.AuxInt 54896 mem := v.Args[2] 54897 x := v.Args[0] 54898 y := v.Args[1] 54899 if !(boundsABI(kind) == 1) { 54900 break 54901 } 54902 v.reset(OpAMD64LoweredPanicBoundsB) 54903 v.AuxInt = kind 54904 v.AddArg(x) 54905 v.AddArg(y) 54906 v.AddArg(mem) 54907 return true 54908 } 54909 // match: (PanicBounds [kind] x y mem) 54910 // cond: boundsABI(kind) == 2 54911 // result: (LoweredPanicBoundsC [kind] x y mem) 54912 for { 54913 kind := v.AuxInt 54914 mem := v.Args[2] 54915 x := v.Args[0] 54916 y := v.Args[1] 54917 if !(boundsABI(kind) == 2) { 54918 break 54919 } 54920 v.reset(OpAMD64LoweredPanicBoundsC) 54921 v.AuxInt = kind 54922 v.AddArg(x) 54923 v.AddArg(y) 54924 v.AddArg(mem) 54925 return true 54926 } 54927 return false 54928 } 54929 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { 54930 b := v.Block 54931 typ := &b.Func.Config.Types 54932 // match: (PopCount16 x) 54933 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 54934 for { 54935 x := v.Args[0] 54936 v.reset(OpAMD64POPCNTL) 54937 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 54938 v0.AddArg(x) 54939 v.AddArg(v0) 54940 return true 54941 } 54942 } 54943 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool { 54944 // match: (PopCount32 x) 54945 // result: (POPCNTL x) 54946 for { 54947 x := v.Args[0] 54948 v.reset(OpAMD64POPCNTL) 54949 v.AddArg(x) 54950 return true 54951 } 54952 } 54953 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool { 54954 // match: (PopCount64 x) 54955 // result: (POPCNTQ x) 54956 for { 54957 x := v.Args[0] 54958 v.reset(OpAMD64POPCNTQ) 54959 v.AddArg(x) 54960 return true 54961 } 54962 } 54963 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { 54964 b := v.Block 54965 typ := &b.Func.Config.Types 54966 // match: (PopCount8 x) 54967 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 54968 for { 54969 x := v.Args[0] 54970 v.reset(OpAMD64POPCNTL) 54971 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 54972 v0.AddArg(x) 54973 v.AddArg(v0) 54974 return true 54975 } 54976 } 54977 func rewriteValueAMD64_OpRotateLeft16_0(v *Value) bool { 54978 // match: (RotateLeft16 a b) 54979 // result: (ROLW a b) 54980 for { 54981 b := v.Args[1] 54982 a := v.Args[0] 54983 v.reset(OpAMD64ROLW) 54984 v.AddArg(a) 54985 v.AddArg(b) 54986 return true 54987 } 54988 } 54989 func rewriteValueAMD64_OpRotateLeft32_0(v *Value) bool { 54990 // match: (RotateLeft32 a b) 54991 // result: (ROLL a b) 54992 for { 54993 b := v.Args[1] 54994 a := v.Args[0] 54995 v.reset(OpAMD64ROLL) 54996 v.AddArg(a) 54997 v.AddArg(b) 54998 return true 54999 } 55000 } 55001 func rewriteValueAMD64_OpRotateLeft64_0(v *Value) bool { 55002 // match: (RotateLeft64 a b) 55003 // result: (ROLQ a b) 55004 for { 55005 b := v.Args[1] 55006 a := v.Args[0] 55007 v.reset(OpAMD64ROLQ) 55008 v.AddArg(a) 55009 v.AddArg(b) 55010 return true 55011 } 55012 } 55013 func rewriteValueAMD64_OpRotateLeft8_0(v *Value) bool { 55014 // match: (RotateLeft8 a b) 55015 // result: (ROLB a b) 55016 for { 55017 b := v.Args[1] 55018 a := v.Args[0] 55019 v.reset(OpAMD64ROLB) 55020 v.AddArg(a) 55021 v.AddArg(b) 55022 return true 55023 } 55024 } 55025 func rewriteValueAMD64_OpRound32F_0(v *Value) bool { 55026 // match: (Round32F x) 55027 // result: x 55028 for { 55029 x := v.Args[0] 55030 v.reset(OpCopy) 55031 v.Type = x.Type 55032 v.AddArg(x) 55033 return true 55034 } 55035 } 55036 func rewriteValueAMD64_OpRound64F_0(v *Value) bool { 55037 // match: (Round64F x) 55038 // result: x 55039 for { 55040 x := v.Args[0] 55041 v.reset(OpCopy) 55042 v.Type = x.Type 55043 v.AddArg(x) 55044 return true 55045 } 55046 } 55047 func rewriteValueAMD64_OpRoundToEven_0(v *Value) bool { 55048 // match: (RoundToEven x) 55049 // result: (ROUNDSD [0] x) 55050 for { 55051 x := v.Args[0] 55052 v.reset(OpAMD64ROUNDSD) 55053 v.AuxInt = 0 55054 v.AddArg(x) 55055 return true 55056 } 55057 } 55058 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { 55059 b := v.Block 55060 // match: (Rsh16Ux16 <t> x y) 55061 // cond: !shiftIsBounded(v) 55062 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 55063 for { 55064 t := v.Type 55065 y := v.Args[1] 55066 x := v.Args[0] 55067 if !(!shiftIsBounded(v)) { 55068 break 55069 } 55070 v.reset(OpAMD64ANDL) 55071 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 55072 v0.AddArg(x) 55073 v0.AddArg(y) 55074 v.AddArg(v0) 55075 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 55076 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 55077 v2.AuxInt = 16 55078 v2.AddArg(y) 55079 v1.AddArg(v2) 55080 v.AddArg(v1) 55081 return true 55082 } 55083 // match: (Rsh16Ux16 x y) 55084 // cond: shiftIsBounded(v) 55085 // result: (SHRW x y) 55086 for { 55087 y := v.Args[1] 55088 x := v.Args[0] 55089 if !(shiftIsBounded(v)) { 55090 break 55091 } 55092 v.reset(OpAMD64SHRW) 55093 v.AddArg(x) 55094 v.AddArg(y) 55095 return true 55096 } 55097 return false 55098 } 55099 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { 55100 b := v.Block 55101 // match: (Rsh16Ux32 <t> x y) 55102 // cond: !shiftIsBounded(v) 55103 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 55104 for { 55105 t := v.Type 55106 y := v.Args[1] 55107 x := v.Args[0] 55108 if !(!shiftIsBounded(v)) { 55109 break 55110 } 55111 v.reset(OpAMD64ANDL) 55112 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 55113 v0.AddArg(x) 55114 v0.AddArg(y) 55115 v.AddArg(v0) 55116 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 55117 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 55118 v2.AuxInt = 16 55119 v2.AddArg(y) 55120 v1.AddArg(v2) 55121 v.AddArg(v1) 55122 return true 55123 } 55124 // match: (Rsh16Ux32 x y) 55125 // cond: shiftIsBounded(v) 55126 // result: (SHRW x y) 55127 for { 55128 y := v.Args[1] 55129 x := v.Args[0] 55130 if !(shiftIsBounded(v)) { 55131 break 55132 } 55133 v.reset(OpAMD64SHRW) 55134 v.AddArg(x) 55135 v.AddArg(y) 55136 return true 55137 } 55138 return false 55139 } 55140 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { 55141 b := v.Block 55142 // match: (Rsh16Ux64 <t> x y) 55143 // cond: !shiftIsBounded(v) 55144 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 55145 for { 55146 t := v.Type 55147 y := v.Args[1] 55148 x := v.Args[0] 55149 if !(!shiftIsBounded(v)) { 55150 break 55151 } 55152 v.reset(OpAMD64ANDL) 55153 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 55154 v0.AddArg(x) 55155 v0.AddArg(y) 55156 v.AddArg(v0) 55157 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 55158 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 55159 v2.AuxInt = 16 55160 v2.AddArg(y) 55161 v1.AddArg(v2) 55162 v.AddArg(v1) 55163 return true 55164 } 55165 // match: (Rsh16Ux64 x y) 55166 // cond: shiftIsBounded(v) 55167 // result: (SHRW x y) 55168 for { 55169 y := v.Args[1] 55170 x := v.Args[0] 55171 if !(shiftIsBounded(v)) { 55172 break 55173 } 55174 v.reset(OpAMD64SHRW) 55175 v.AddArg(x) 55176 v.AddArg(y) 55177 return true 55178 } 55179 return false 55180 } 55181 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { 55182 b := v.Block 55183 // match: (Rsh16Ux8 <t> x y) 55184 // cond: !shiftIsBounded(v) 55185 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 55186 for { 55187 t := v.Type 55188 y := v.Args[1] 55189 x := v.Args[0] 55190 if !(!shiftIsBounded(v)) { 55191 break 55192 } 55193 v.reset(OpAMD64ANDL) 55194 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 55195 v0.AddArg(x) 55196 v0.AddArg(y) 55197 v.AddArg(v0) 55198 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 55199 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 55200 v2.AuxInt = 16 55201 v2.AddArg(y) 55202 v1.AddArg(v2) 55203 v.AddArg(v1) 55204 return true 55205 } 55206 // match: (Rsh16Ux8 x y) 55207 // cond: shiftIsBounded(v) 55208 // result: (SHRW x y) 55209 for { 55210 y := v.Args[1] 55211 x := v.Args[0] 55212 if !(shiftIsBounded(v)) { 55213 break 55214 } 55215 v.reset(OpAMD64SHRW) 55216 v.AddArg(x) 55217 v.AddArg(y) 55218 return true 55219 } 55220 return false 55221 } 55222 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { 55223 b := v.Block 55224 // match: (Rsh16x16 <t> x y) 55225 // cond: !shiftIsBounded(v) 55226 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 55227 for { 55228 t := v.Type 55229 y := v.Args[1] 55230 x := v.Args[0] 55231 if !(!shiftIsBounded(v)) { 55232 break 55233 } 55234 v.reset(OpAMD64SARW) 55235 v.Type = t 55236 v.AddArg(x) 55237 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 55238 v0.AddArg(y) 55239 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 55240 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 55241 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 55242 v3.AuxInt = 16 55243 v3.AddArg(y) 55244 v2.AddArg(v3) 55245 v1.AddArg(v2) 55246 v0.AddArg(v1) 55247 v.AddArg(v0) 55248 return true 55249 } 55250 // match: (Rsh16x16 x y) 55251 // cond: shiftIsBounded(v) 55252 // result: (SARW x y) 55253 for { 55254 y := v.Args[1] 55255 x := v.Args[0] 55256 if !(shiftIsBounded(v)) { 55257 break 55258 } 55259 v.reset(OpAMD64SARW) 55260 v.AddArg(x) 55261 v.AddArg(y) 55262 return true 55263 } 55264 return false 55265 } 55266 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { 55267 b := v.Block 55268 // match: (Rsh16x32 <t> x y) 55269 // cond: !shiftIsBounded(v) 55270 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 55271 for { 55272 t := v.Type 55273 y := v.Args[1] 55274 x := v.Args[0] 55275 if !(!shiftIsBounded(v)) { 55276 break 55277 } 55278 v.reset(OpAMD64SARW) 55279 v.Type = t 55280 v.AddArg(x) 55281 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 55282 v0.AddArg(y) 55283 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 55284 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 55285 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 55286 v3.AuxInt = 16 55287 v3.AddArg(y) 55288 v2.AddArg(v3) 55289 v1.AddArg(v2) 55290 v0.AddArg(v1) 55291 v.AddArg(v0) 55292 return true 55293 } 55294 // match: (Rsh16x32 x y) 55295 // cond: shiftIsBounded(v) 55296 // result: (SARW x y) 55297 for { 55298 y := v.Args[1] 55299 x := v.Args[0] 55300 if !(shiftIsBounded(v)) { 55301 break 55302 } 55303 v.reset(OpAMD64SARW) 55304 v.AddArg(x) 55305 v.AddArg(y) 55306 return true 55307 } 55308 return false 55309 } 55310 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { 55311 b := v.Block 55312 // match: (Rsh16x64 <t> x y) 55313 // cond: !shiftIsBounded(v) 55314 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 55315 for { 55316 t := v.Type 55317 y := v.Args[1] 55318 x := v.Args[0] 55319 if !(!shiftIsBounded(v)) { 55320 break 55321 } 55322 v.reset(OpAMD64SARW) 55323 v.Type = t 55324 v.AddArg(x) 55325 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 55326 v0.AddArg(y) 55327 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 55328 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 55329 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 55330 v3.AuxInt = 16 55331 v3.AddArg(y) 55332 v2.AddArg(v3) 55333 v1.AddArg(v2) 55334 v0.AddArg(v1) 55335 v.AddArg(v0) 55336 return true 55337 } 55338 // match: (Rsh16x64 x y) 55339 // cond: shiftIsBounded(v) 55340 // result: (SARW x y) 55341 for { 55342 y := v.Args[1] 55343 x := v.Args[0] 55344 if !(shiftIsBounded(v)) { 55345 break 55346 } 55347 v.reset(OpAMD64SARW) 55348 v.AddArg(x) 55349 v.AddArg(y) 55350 return true 55351 } 55352 return false 55353 } 55354 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { 55355 b := v.Block 55356 // match: (Rsh16x8 <t> x y) 55357 // cond: !shiftIsBounded(v) 55358 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 55359 for { 55360 t := v.Type 55361 y := v.Args[1] 55362 x := v.Args[0] 55363 if !(!shiftIsBounded(v)) { 55364 break 55365 } 55366 v.reset(OpAMD64SARW) 55367 v.Type = t 55368 v.AddArg(x) 55369 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 55370 v0.AddArg(y) 55371 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 55372 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 55373 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 55374 v3.AuxInt = 16 55375 v3.AddArg(y) 55376 v2.AddArg(v3) 55377 v1.AddArg(v2) 55378 v0.AddArg(v1) 55379 v.AddArg(v0) 55380 return true 55381 } 55382 // match: (Rsh16x8 x y) 55383 // cond: shiftIsBounded(v) 55384 // result: (SARW x y) 55385 for { 55386 y := v.Args[1] 55387 x := v.Args[0] 55388 if !(shiftIsBounded(v)) { 55389 break 55390 } 55391 v.reset(OpAMD64SARW) 55392 v.AddArg(x) 55393 v.AddArg(y) 55394 return true 55395 } 55396 return false 55397 } 55398 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { 55399 b := v.Block 55400 // match: (Rsh32Ux16 <t> x y) 55401 // cond: !shiftIsBounded(v) 55402 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 55403 for { 55404 t := v.Type 55405 y := v.Args[1] 55406 x := v.Args[0] 55407 if !(!shiftIsBounded(v)) { 55408 break 55409 } 55410 v.reset(OpAMD64ANDL) 55411 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 55412 v0.AddArg(x) 55413 v0.AddArg(y) 55414 v.AddArg(v0) 55415 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 55416 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 55417 v2.AuxInt = 32 55418 v2.AddArg(y) 55419 v1.AddArg(v2) 55420 v.AddArg(v1) 55421 return true 55422 } 55423 // match: (Rsh32Ux16 x y) 55424 // cond: shiftIsBounded(v) 55425 // result: (SHRL x y) 55426 for { 55427 y := v.Args[1] 55428 x := v.Args[0] 55429 if !(shiftIsBounded(v)) { 55430 break 55431 } 55432 v.reset(OpAMD64SHRL) 55433 v.AddArg(x) 55434 v.AddArg(y) 55435 return true 55436 } 55437 return false 55438 } 55439 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { 55440 b := v.Block 55441 // match: (Rsh32Ux32 <t> x y) 55442 // cond: !shiftIsBounded(v) 55443 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 55444 for { 55445 t := v.Type 55446 y := v.Args[1] 55447 x := v.Args[0] 55448 if !(!shiftIsBounded(v)) { 55449 break 55450 } 55451 v.reset(OpAMD64ANDL) 55452 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 55453 v0.AddArg(x) 55454 v0.AddArg(y) 55455 v.AddArg(v0) 55456 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 55457 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 55458 v2.AuxInt = 32 55459 v2.AddArg(y) 55460 v1.AddArg(v2) 55461 v.AddArg(v1) 55462 return true 55463 } 55464 // match: (Rsh32Ux32 x y) 55465 // cond: shiftIsBounded(v) 55466 // result: (SHRL x y) 55467 for { 55468 y := v.Args[1] 55469 x := v.Args[0] 55470 if !(shiftIsBounded(v)) { 55471 break 55472 } 55473 v.reset(OpAMD64SHRL) 55474 v.AddArg(x) 55475 v.AddArg(y) 55476 return true 55477 } 55478 return false 55479 } 55480 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { 55481 b := v.Block 55482 // match: (Rsh32Ux64 <t> x y) 55483 // cond: !shiftIsBounded(v) 55484 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 55485 for { 55486 t := v.Type 55487 y := v.Args[1] 55488 x := v.Args[0] 55489 if !(!shiftIsBounded(v)) { 55490 break 55491 } 55492 v.reset(OpAMD64ANDL) 55493 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 55494 v0.AddArg(x) 55495 v0.AddArg(y) 55496 v.AddArg(v0) 55497 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 55498 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 55499 v2.AuxInt = 32 55500 v2.AddArg(y) 55501 v1.AddArg(v2) 55502 v.AddArg(v1) 55503 return true 55504 } 55505 // match: (Rsh32Ux64 x y) 55506 // cond: shiftIsBounded(v) 55507 // result: (SHRL x y) 55508 for { 55509 y := v.Args[1] 55510 x := v.Args[0] 55511 if !(shiftIsBounded(v)) { 55512 break 55513 } 55514 v.reset(OpAMD64SHRL) 55515 v.AddArg(x) 55516 v.AddArg(y) 55517 return true 55518 } 55519 return false 55520 } 55521 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { 55522 b := v.Block 55523 // match: (Rsh32Ux8 <t> x y) 55524 // cond: !shiftIsBounded(v) 55525 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 55526 for { 55527 t := v.Type 55528 y := v.Args[1] 55529 x := v.Args[0] 55530 if !(!shiftIsBounded(v)) { 55531 break 55532 } 55533 v.reset(OpAMD64ANDL) 55534 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 55535 v0.AddArg(x) 55536 v0.AddArg(y) 55537 v.AddArg(v0) 55538 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 55539 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 55540 v2.AuxInt = 32 55541 v2.AddArg(y) 55542 v1.AddArg(v2) 55543 v.AddArg(v1) 55544 return true 55545 } 55546 // match: (Rsh32Ux8 x y) 55547 // cond: shiftIsBounded(v) 55548 // result: (SHRL x y) 55549 for { 55550 y := v.Args[1] 55551 x := v.Args[0] 55552 if !(shiftIsBounded(v)) { 55553 break 55554 } 55555 v.reset(OpAMD64SHRL) 55556 v.AddArg(x) 55557 v.AddArg(y) 55558 return true 55559 } 55560 return false 55561 } 55562 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { 55563 b := v.Block 55564 // match: (Rsh32x16 <t> x y) 55565 // cond: !shiftIsBounded(v) 55566 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 55567 for { 55568 t := v.Type 55569 y := v.Args[1] 55570 x := v.Args[0] 55571 if !(!shiftIsBounded(v)) { 55572 break 55573 } 55574 v.reset(OpAMD64SARL) 55575 v.Type = t 55576 v.AddArg(x) 55577 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 55578 v0.AddArg(y) 55579 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 55580 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 55581 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 55582 v3.AuxInt = 32 55583 v3.AddArg(y) 55584 v2.AddArg(v3) 55585 v1.AddArg(v2) 55586 v0.AddArg(v1) 55587 v.AddArg(v0) 55588 return true 55589 } 55590 // match: (Rsh32x16 x y) 55591 // cond: shiftIsBounded(v) 55592 // result: (SARL x y) 55593 for { 55594 y := v.Args[1] 55595 x := v.Args[0] 55596 if !(shiftIsBounded(v)) { 55597 break 55598 } 55599 v.reset(OpAMD64SARL) 55600 v.AddArg(x) 55601 v.AddArg(y) 55602 return true 55603 } 55604 return false 55605 } 55606 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { 55607 b := v.Block 55608 // match: (Rsh32x32 <t> x y) 55609 // cond: !shiftIsBounded(v) 55610 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 55611 for { 55612 t := v.Type 55613 y := v.Args[1] 55614 x := v.Args[0] 55615 if !(!shiftIsBounded(v)) { 55616 break 55617 } 55618 v.reset(OpAMD64SARL) 55619 v.Type = t 55620 v.AddArg(x) 55621 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 55622 v0.AddArg(y) 55623 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 55624 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 55625 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 55626 v3.AuxInt = 32 55627 v3.AddArg(y) 55628 v2.AddArg(v3) 55629 v1.AddArg(v2) 55630 v0.AddArg(v1) 55631 v.AddArg(v0) 55632 return true 55633 } 55634 // match: (Rsh32x32 x y) 55635 // cond: shiftIsBounded(v) 55636 // result: (SARL x y) 55637 for { 55638 y := v.Args[1] 55639 x := v.Args[0] 55640 if !(shiftIsBounded(v)) { 55641 break 55642 } 55643 v.reset(OpAMD64SARL) 55644 v.AddArg(x) 55645 v.AddArg(y) 55646 return true 55647 } 55648 return false 55649 } 55650 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { 55651 b := v.Block 55652 // match: (Rsh32x64 <t> x y) 55653 // cond: !shiftIsBounded(v) 55654 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 55655 for { 55656 t := v.Type 55657 y := v.Args[1] 55658 x := v.Args[0] 55659 if !(!shiftIsBounded(v)) { 55660 break 55661 } 55662 v.reset(OpAMD64SARL) 55663 v.Type = t 55664 v.AddArg(x) 55665 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 55666 v0.AddArg(y) 55667 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 55668 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 55669 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 55670 v3.AuxInt = 32 55671 v3.AddArg(y) 55672 v2.AddArg(v3) 55673 v1.AddArg(v2) 55674 v0.AddArg(v1) 55675 v.AddArg(v0) 55676 return true 55677 } 55678 // match: (Rsh32x64 x y) 55679 // cond: shiftIsBounded(v) 55680 // result: (SARL x y) 55681 for { 55682 y := v.Args[1] 55683 x := v.Args[0] 55684 if !(shiftIsBounded(v)) { 55685 break 55686 } 55687 v.reset(OpAMD64SARL) 55688 v.AddArg(x) 55689 v.AddArg(y) 55690 return true 55691 } 55692 return false 55693 } 55694 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { 55695 b := v.Block 55696 // match: (Rsh32x8 <t> x y) 55697 // cond: !shiftIsBounded(v) 55698 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 55699 for { 55700 t := v.Type 55701 y := v.Args[1] 55702 x := v.Args[0] 55703 if !(!shiftIsBounded(v)) { 55704 break 55705 } 55706 v.reset(OpAMD64SARL) 55707 v.Type = t 55708 v.AddArg(x) 55709 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 55710 v0.AddArg(y) 55711 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 55712 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 55713 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 55714 v3.AuxInt = 32 55715 v3.AddArg(y) 55716 v2.AddArg(v3) 55717 v1.AddArg(v2) 55718 v0.AddArg(v1) 55719 v.AddArg(v0) 55720 return true 55721 } 55722 // match: (Rsh32x8 x y) 55723 // cond: shiftIsBounded(v) 55724 // result: (SARL x y) 55725 for { 55726 y := v.Args[1] 55727 x := v.Args[0] 55728 if !(shiftIsBounded(v)) { 55729 break 55730 } 55731 v.reset(OpAMD64SARL) 55732 v.AddArg(x) 55733 v.AddArg(y) 55734 return true 55735 } 55736 return false 55737 } 55738 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { 55739 b := v.Block 55740 // match: (Rsh64Ux16 <t> x y) 55741 // cond: !shiftIsBounded(v) 55742 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 55743 for { 55744 t := v.Type 55745 y := v.Args[1] 55746 x := v.Args[0] 55747 if !(!shiftIsBounded(v)) { 55748 break 55749 } 55750 v.reset(OpAMD64ANDQ) 55751 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 55752 v0.AddArg(x) 55753 v0.AddArg(y) 55754 v.AddArg(v0) 55755 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 55756 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 55757 v2.AuxInt = 64 55758 v2.AddArg(y) 55759 v1.AddArg(v2) 55760 v.AddArg(v1) 55761 return true 55762 } 55763 // match: (Rsh64Ux16 x y) 55764 // cond: shiftIsBounded(v) 55765 // result: (SHRQ x y) 55766 for { 55767 y := v.Args[1] 55768 x := v.Args[0] 55769 if !(shiftIsBounded(v)) { 55770 break 55771 } 55772 v.reset(OpAMD64SHRQ) 55773 v.AddArg(x) 55774 v.AddArg(y) 55775 return true 55776 } 55777 return false 55778 } 55779 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { 55780 b := v.Block 55781 // match: (Rsh64Ux32 <t> x y) 55782 // cond: !shiftIsBounded(v) 55783 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 55784 for { 55785 t := v.Type 55786 y := v.Args[1] 55787 x := v.Args[0] 55788 if !(!shiftIsBounded(v)) { 55789 break 55790 } 55791 v.reset(OpAMD64ANDQ) 55792 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 55793 v0.AddArg(x) 55794 v0.AddArg(y) 55795 v.AddArg(v0) 55796 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 55797 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 55798 v2.AuxInt = 64 55799 v2.AddArg(y) 55800 v1.AddArg(v2) 55801 v.AddArg(v1) 55802 return true 55803 } 55804 // match: (Rsh64Ux32 x y) 55805 // cond: shiftIsBounded(v) 55806 // result: (SHRQ x y) 55807 for { 55808 y := v.Args[1] 55809 x := v.Args[0] 55810 if !(shiftIsBounded(v)) { 55811 break 55812 } 55813 v.reset(OpAMD64SHRQ) 55814 v.AddArg(x) 55815 v.AddArg(y) 55816 return true 55817 } 55818 return false 55819 } 55820 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { 55821 b := v.Block 55822 // match: (Rsh64Ux64 <t> x y) 55823 // cond: !shiftIsBounded(v) 55824 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 55825 for { 55826 t := v.Type 55827 y := v.Args[1] 55828 x := v.Args[0] 55829 if !(!shiftIsBounded(v)) { 55830 break 55831 } 55832 v.reset(OpAMD64ANDQ) 55833 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 55834 v0.AddArg(x) 55835 v0.AddArg(y) 55836 v.AddArg(v0) 55837 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 55838 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 55839 v2.AuxInt = 64 55840 v2.AddArg(y) 55841 v1.AddArg(v2) 55842 v.AddArg(v1) 55843 return true 55844 } 55845 // match: (Rsh64Ux64 x y) 55846 // cond: shiftIsBounded(v) 55847 // result: (SHRQ x y) 55848 for { 55849 y := v.Args[1] 55850 x := v.Args[0] 55851 if !(shiftIsBounded(v)) { 55852 break 55853 } 55854 v.reset(OpAMD64SHRQ) 55855 v.AddArg(x) 55856 v.AddArg(y) 55857 return true 55858 } 55859 return false 55860 } 55861 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { 55862 b := v.Block 55863 // match: (Rsh64Ux8 <t> x y) 55864 // cond: !shiftIsBounded(v) 55865 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 55866 for { 55867 t := v.Type 55868 y := v.Args[1] 55869 x := v.Args[0] 55870 if !(!shiftIsBounded(v)) { 55871 break 55872 } 55873 v.reset(OpAMD64ANDQ) 55874 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 55875 v0.AddArg(x) 55876 v0.AddArg(y) 55877 v.AddArg(v0) 55878 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 55879 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 55880 v2.AuxInt = 64 55881 v2.AddArg(y) 55882 v1.AddArg(v2) 55883 v.AddArg(v1) 55884 return true 55885 } 55886 // match: (Rsh64Ux8 x y) 55887 // cond: shiftIsBounded(v) 55888 // result: (SHRQ x y) 55889 for { 55890 y := v.Args[1] 55891 x := v.Args[0] 55892 if !(shiftIsBounded(v)) { 55893 break 55894 } 55895 v.reset(OpAMD64SHRQ) 55896 v.AddArg(x) 55897 v.AddArg(y) 55898 return true 55899 } 55900 return false 55901 } 55902 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { 55903 b := v.Block 55904 // match: (Rsh64x16 <t> x y) 55905 // cond: !shiftIsBounded(v) 55906 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 55907 for { 55908 t := v.Type 55909 y := v.Args[1] 55910 x := v.Args[0] 55911 if !(!shiftIsBounded(v)) { 55912 break 55913 } 55914 v.reset(OpAMD64SARQ) 55915 v.Type = t 55916 v.AddArg(x) 55917 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 55918 v0.AddArg(y) 55919 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 55920 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 55921 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 55922 v3.AuxInt = 64 55923 v3.AddArg(y) 55924 v2.AddArg(v3) 55925 v1.AddArg(v2) 55926 v0.AddArg(v1) 55927 v.AddArg(v0) 55928 return true 55929 } 55930 // match: (Rsh64x16 x y) 55931 // cond: shiftIsBounded(v) 55932 // result: (SARQ x y) 55933 for { 55934 y := v.Args[1] 55935 x := v.Args[0] 55936 if !(shiftIsBounded(v)) { 55937 break 55938 } 55939 v.reset(OpAMD64SARQ) 55940 v.AddArg(x) 55941 v.AddArg(y) 55942 return true 55943 } 55944 return false 55945 } 55946 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { 55947 b := v.Block 55948 // match: (Rsh64x32 <t> x y) 55949 // cond: !shiftIsBounded(v) 55950 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 55951 for { 55952 t := v.Type 55953 y := v.Args[1] 55954 x := v.Args[0] 55955 if !(!shiftIsBounded(v)) { 55956 break 55957 } 55958 v.reset(OpAMD64SARQ) 55959 v.Type = t 55960 v.AddArg(x) 55961 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 55962 v0.AddArg(y) 55963 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 55964 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 55965 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 55966 v3.AuxInt = 64 55967 v3.AddArg(y) 55968 v2.AddArg(v3) 55969 v1.AddArg(v2) 55970 v0.AddArg(v1) 55971 v.AddArg(v0) 55972 return true 55973 } 55974 // match: (Rsh64x32 x y) 55975 // cond: shiftIsBounded(v) 55976 // result: (SARQ x y) 55977 for { 55978 y := v.Args[1] 55979 x := v.Args[0] 55980 if !(shiftIsBounded(v)) { 55981 break 55982 } 55983 v.reset(OpAMD64SARQ) 55984 v.AddArg(x) 55985 v.AddArg(y) 55986 return true 55987 } 55988 return false 55989 } 55990 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { 55991 b := v.Block 55992 // match: (Rsh64x64 <t> x y) 55993 // cond: !shiftIsBounded(v) 55994 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 55995 for { 55996 t := v.Type 55997 y := v.Args[1] 55998 x := v.Args[0] 55999 if !(!shiftIsBounded(v)) { 56000 break 56001 } 56002 v.reset(OpAMD64SARQ) 56003 v.Type = t 56004 v.AddArg(x) 56005 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 56006 v0.AddArg(y) 56007 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 56008 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 56009 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 56010 v3.AuxInt = 64 56011 v3.AddArg(y) 56012 v2.AddArg(v3) 56013 v1.AddArg(v2) 56014 v0.AddArg(v1) 56015 v.AddArg(v0) 56016 return true 56017 } 56018 // match: (Rsh64x64 x y) 56019 // cond: shiftIsBounded(v) 56020 // result: (SARQ x y) 56021 for { 56022 y := v.Args[1] 56023 x := v.Args[0] 56024 if !(shiftIsBounded(v)) { 56025 break 56026 } 56027 v.reset(OpAMD64SARQ) 56028 v.AddArg(x) 56029 v.AddArg(y) 56030 return true 56031 } 56032 return false 56033 } 56034 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { 56035 b := v.Block 56036 // match: (Rsh64x8 <t> x y) 56037 // cond: !shiftIsBounded(v) 56038 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 56039 for { 56040 t := v.Type 56041 y := v.Args[1] 56042 x := v.Args[0] 56043 if !(!shiftIsBounded(v)) { 56044 break 56045 } 56046 v.reset(OpAMD64SARQ) 56047 v.Type = t 56048 v.AddArg(x) 56049 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 56050 v0.AddArg(y) 56051 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 56052 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 56053 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 56054 v3.AuxInt = 64 56055 v3.AddArg(y) 56056 v2.AddArg(v3) 56057 v1.AddArg(v2) 56058 v0.AddArg(v1) 56059 v.AddArg(v0) 56060 return true 56061 } 56062 // match: (Rsh64x8 x y) 56063 // cond: shiftIsBounded(v) 56064 // result: (SARQ x y) 56065 for { 56066 y := v.Args[1] 56067 x := v.Args[0] 56068 if !(shiftIsBounded(v)) { 56069 break 56070 } 56071 v.reset(OpAMD64SARQ) 56072 v.AddArg(x) 56073 v.AddArg(y) 56074 return true 56075 } 56076 return false 56077 } 56078 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { 56079 b := v.Block 56080 // match: (Rsh8Ux16 <t> x y) 56081 // cond: !shiftIsBounded(v) 56082 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 56083 for { 56084 t := v.Type 56085 y := v.Args[1] 56086 x := v.Args[0] 56087 if !(!shiftIsBounded(v)) { 56088 break 56089 } 56090 v.reset(OpAMD64ANDL) 56091 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 56092 v0.AddArg(x) 56093 v0.AddArg(y) 56094 v.AddArg(v0) 56095 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 56096 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 56097 v2.AuxInt = 8 56098 v2.AddArg(y) 56099 v1.AddArg(v2) 56100 v.AddArg(v1) 56101 return true 56102 } 56103 // match: (Rsh8Ux16 x y) 56104 // cond: shiftIsBounded(v) 56105 // result: (SHRB x y) 56106 for { 56107 y := v.Args[1] 56108 x := v.Args[0] 56109 if !(shiftIsBounded(v)) { 56110 break 56111 } 56112 v.reset(OpAMD64SHRB) 56113 v.AddArg(x) 56114 v.AddArg(y) 56115 return true 56116 } 56117 return false 56118 } 56119 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { 56120 b := v.Block 56121 // match: (Rsh8Ux32 <t> x y) 56122 // cond: !shiftIsBounded(v) 56123 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 56124 for { 56125 t := v.Type 56126 y := v.Args[1] 56127 x := v.Args[0] 56128 if !(!shiftIsBounded(v)) { 56129 break 56130 } 56131 v.reset(OpAMD64ANDL) 56132 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 56133 v0.AddArg(x) 56134 v0.AddArg(y) 56135 v.AddArg(v0) 56136 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 56137 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 56138 v2.AuxInt = 8 56139 v2.AddArg(y) 56140 v1.AddArg(v2) 56141 v.AddArg(v1) 56142 return true 56143 } 56144 // match: (Rsh8Ux32 x y) 56145 // cond: shiftIsBounded(v) 56146 // result: (SHRB x y) 56147 for { 56148 y := v.Args[1] 56149 x := v.Args[0] 56150 if !(shiftIsBounded(v)) { 56151 break 56152 } 56153 v.reset(OpAMD64SHRB) 56154 v.AddArg(x) 56155 v.AddArg(y) 56156 return true 56157 } 56158 return false 56159 } 56160 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { 56161 b := v.Block 56162 // match: (Rsh8Ux64 <t> x y) 56163 // cond: !shiftIsBounded(v) 56164 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 56165 for { 56166 t := v.Type 56167 y := v.Args[1] 56168 x := v.Args[0] 56169 if !(!shiftIsBounded(v)) { 56170 break 56171 } 56172 v.reset(OpAMD64ANDL) 56173 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 56174 v0.AddArg(x) 56175 v0.AddArg(y) 56176 v.AddArg(v0) 56177 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 56178 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 56179 v2.AuxInt = 8 56180 v2.AddArg(y) 56181 v1.AddArg(v2) 56182 v.AddArg(v1) 56183 return true 56184 } 56185 // match: (Rsh8Ux64 x y) 56186 // cond: shiftIsBounded(v) 56187 // result: (SHRB x y) 56188 for { 56189 y := v.Args[1] 56190 x := v.Args[0] 56191 if !(shiftIsBounded(v)) { 56192 break 56193 } 56194 v.reset(OpAMD64SHRB) 56195 v.AddArg(x) 56196 v.AddArg(y) 56197 return true 56198 } 56199 return false 56200 } 56201 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { 56202 b := v.Block 56203 // match: (Rsh8Ux8 <t> x y) 56204 // cond: !shiftIsBounded(v) 56205 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 56206 for { 56207 t := v.Type 56208 y := v.Args[1] 56209 x := v.Args[0] 56210 if !(!shiftIsBounded(v)) { 56211 break 56212 } 56213 v.reset(OpAMD64ANDL) 56214 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 56215 v0.AddArg(x) 56216 v0.AddArg(y) 56217 v.AddArg(v0) 56218 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 56219 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 56220 v2.AuxInt = 8 56221 v2.AddArg(y) 56222 v1.AddArg(v2) 56223 v.AddArg(v1) 56224 return true 56225 } 56226 // match: (Rsh8Ux8 x y) 56227 // cond: shiftIsBounded(v) 56228 // result: (SHRB x y) 56229 for { 56230 y := v.Args[1] 56231 x := v.Args[0] 56232 if !(shiftIsBounded(v)) { 56233 break 56234 } 56235 v.reset(OpAMD64SHRB) 56236 v.AddArg(x) 56237 v.AddArg(y) 56238 return true 56239 } 56240 return false 56241 } 56242 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { 56243 b := v.Block 56244 // match: (Rsh8x16 <t> x y) 56245 // cond: !shiftIsBounded(v) 56246 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 56247 for { 56248 t := v.Type 56249 y := v.Args[1] 56250 x := v.Args[0] 56251 if !(!shiftIsBounded(v)) { 56252 break 56253 } 56254 v.reset(OpAMD64SARB) 56255 v.Type = t 56256 v.AddArg(x) 56257 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 56258 v0.AddArg(y) 56259 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 56260 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 56261 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 56262 v3.AuxInt = 8 56263 v3.AddArg(y) 56264 v2.AddArg(v3) 56265 v1.AddArg(v2) 56266 v0.AddArg(v1) 56267 v.AddArg(v0) 56268 return true 56269 } 56270 // match: (Rsh8x16 x y) 56271 // cond: shiftIsBounded(v) 56272 // result: (SARB x y) 56273 for { 56274 y := v.Args[1] 56275 x := v.Args[0] 56276 if !(shiftIsBounded(v)) { 56277 break 56278 } 56279 v.reset(OpAMD64SARB) 56280 v.AddArg(x) 56281 v.AddArg(y) 56282 return true 56283 } 56284 return false 56285 } 56286 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { 56287 b := v.Block 56288 // match: (Rsh8x32 <t> x y) 56289 // cond: !shiftIsBounded(v) 56290 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 56291 for { 56292 t := v.Type 56293 y := v.Args[1] 56294 x := v.Args[0] 56295 if !(!shiftIsBounded(v)) { 56296 break 56297 } 56298 v.reset(OpAMD64SARB) 56299 v.Type = t 56300 v.AddArg(x) 56301 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 56302 v0.AddArg(y) 56303 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 56304 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 56305 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 56306 v3.AuxInt = 8 56307 v3.AddArg(y) 56308 v2.AddArg(v3) 56309 v1.AddArg(v2) 56310 v0.AddArg(v1) 56311 v.AddArg(v0) 56312 return true 56313 } 56314 // match: (Rsh8x32 x y) 56315 // cond: shiftIsBounded(v) 56316 // result: (SARB x y) 56317 for { 56318 y := v.Args[1] 56319 x := v.Args[0] 56320 if !(shiftIsBounded(v)) { 56321 break 56322 } 56323 v.reset(OpAMD64SARB) 56324 v.AddArg(x) 56325 v.AddArg(y) 56326 return true 56327 } 56328 return false 56329 } 56330 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { 56331 b := v.Block 56332 // match: (Rsh8x64 <t> x y) 56333 // cond: !shiftIsBounded(v) 56334 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 56335 for { 56336 t := v.Type 56337 y := v.Args[1] 56338 x := v.Args[0] 56339 if !(!shiftIsBounded(v)) { 56340 break 56341 } 56342 v.reset(OpAMD64SARB) 56343 v.Type = t 56344 v.AddArg(x) 56345 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 56346 v0.AddArg(y) 56347 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 56348 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 56349 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 56350 v3.AuxInt = 8 56351 v3.AddArg(y) 56352 v2.AddArg(v3) 56353 v1.AddArg(v2) 56354 v0.AddArg(v1) 56355 v.AddArg(v0) 56356 return true 56357 } 56358 // match: (Rsh8x64 x y) 56359 // cond: shiftIsBounded(v) 56360 // result: (SARB x y) 56361 for { 56362 y := v.Args[1] 56363 x := v.Args[0] 56364 if !(shiftIsBounded(v)) { 56365 break 56366 } 56367 v.reset(OpAMD64SARB) 56368 v.AddArg(x) 56369 v.AddArg(y) 56370 return true 56371 } 56372 return false 56373 } 56374 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { 56375 b := v.Block 56376 // match: (Rsh8x8 <t> x y) 56377 // cond: !shiftIsBounded(v) 56378 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 56379 for { 56380 t := v.Type 56381 y := v.Args[1] 56382 x := v.Args[0] 56383 if !(!shiftIsBounded(v)) { 56384 break 56385 } 56386 v.reset(OpAMD64SARB) 56387 v.Type = t 56388 v.AddArg(x) 56389 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 56390 v0.AddArg(y) 56391 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 56392 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 56393 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 56394 v3.AuxInt = 8 56395 v3.AddArg(y) 56396 v2.AddArg(v3) 56397 v1.AddArg(v2) 56398 v0.AddArg(v1) 56399 v.AddArg(v0) 56400 return true 56401 } 56402 // match: (Rsh8x8 x y) 56403 // cond: shiftIsBounded(v) 56404 // result: (SARB x y) 56405 for { 56406 y := v.Args[1] 56407 x := v.Args[0] 56408 if !(shiftIsBounded(v)) { 56409 break 56410 } 56411 v.reset(OpAMD64SARB) 56412 v.AddArg(x) 56413 v.AddArg(y) 56414 return true 56415 } 56416 return false 56417 } 56418 func rewriteValueAMD64_OpSelect0_0(v *Value) bool { 56419 b := v.Block 56420 typ := &b.Func.Config.Types 56421 // match: (Select0 (Mul64uover x y)) 56422 // result: (Select0 <typ.UInt64> (MULQU x y)) 56423 for { 56424 v_0 := v.Args[0] 56425 if v_0.Op != OpMul64uover { 56426 break 56427 } 56428 y := v_0.Args[1] 56429 x := v_0.Args[0] 56430 v.reset(OpSelect0) 56431 v.Type = typ.UInt64 56432 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) 56433 v0.AddArg(x) 56434 v0.AddArg(y) 56435 v.AddArg(v0) 56436 return true 56437 } 56438 // match: (Select0 (Mul32uover x y)) 56439 // result: (Select0 <typ.UInt32> (MULLU x y)) 56440 for { 56441 v_0 := v.Args[0] 56442 if v_0.Op != OpMul32uover { 56443 break 56444 } 56445 y := v_0.Args[1] 56446 x := v_0.Args[0] 56447 v.reset(OpSelect0) 56448 v.Type = typ.UInt32 56449 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) 56450 v0.AddArg(x) 56451 v0.AddArg(y) 56452 v.AddArg(v0) 56453 return true 56454 } 56455 // match: (Select0 (Add64carry x y c)) 56456 // result: (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c)))) 56457 for { 56458 v_0 := v.Args[0] 56459 if v_0.Op != OpAdd64carry { 56460 break 56461 } 56462 c := v_0.Args[2] 56463 x := v_0.Args[0] 56464 y := v_0.Args[1] 56465 v.reset(OpSelect0) 56466 v.Type = typ.UInt64 56467 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 56468 v0.AddArg(x) 56469 v0.AddArg(y) 56470 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 56471 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 56472 v2.AddArg(c) 56473 v1.AddArg(v2) 56474 v0.AddArg(v1) 56475 v.AddArg(v0) 56476 return true 56477 } 56478 // match: (Select0 (Sub64borrow x y c)) 56479 // result: (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c)))) 56480 for { 56481 v_0 := v.Args[0] 56482 if v_0.Op != OpSub64borrow { 56483 break 56484 } 56485 c := v_0.Args[2] 56486 x := v_0.Args[0] 56487 y := v_0.Args[1] 56488 v.reset(OpSelect0) 56489 v.Type = typ.UInt64 56490 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 56491 v0.AddArg(x) 56492 v0.AddArg(y) 56493 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 56494 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 56495 v2.AddArg(c) 56496 v1.AddArg(v2) 56497 v0.AddArg(v1) 56498 v.AddArg(v0) 56499 return true 56500 } 56501 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 56502 // result: (ADDL val (Select0 <t> tuple)) 56503 for { 56504 t := v.Type 56505 v_0 := v.Args[0] 56506 if v_0.Op != OpAMD64AddTupleFirst32 { 56507 break 56508 } 56509 tuple := v_0.Args[1] 56510 val := v_0.Args[0] 56511 v.reset(OpAMD64ADDL) 56512 v.AddArg(val) 56513 v0 := b.NewValue0(v.Pos, OpSelect0, t) 56514 v0.AddArg(tuple) 56515 v.AddArg(v0) 56516 return true 56517 } 56518 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 56519 // result: (ADDQ val (Select0 <t> tuple)) 56520 for { 56521 t := v.Type 56522 v_0 := v.Args[0] 56523 if v_0.Op != OpAMD64AddTupleFirst64 { 56524 break 56525 } 56526 tuple := v_0.Args[1] 56527 val := v_0.Args[0] 56528 v.reset(OpAMD64ADDQ) 56529 v.AddArg(val) 56530 v0 := b.NewValue0(v.Pos, OpSelect0, t) 56531 v0.AddArg(tuple) 56532 v.AddArg(v0) 56533 return true 56534 } 56535 return false 56536 } 56537 func rewriteValueAMD64_OpSelect1_0(v *Value) bool { 56538 b := v.Block 56539 typ := &b.Func.Config.Types 56540 // match: (Select1 (Mul64uover x y)) 56541 // result: (SETO (Select1 <types.TypeFlags> (MULQU x y))) 56542 for { 56543 v_0 := v.Args[0] 56544 if v_0.Op != OpMul64uover { 56545 break 56546 } 56547 y := v_0.Args[1] 56548 x := v_0.Args[0] 56549 v.reset(OpAMD64SETO) 56550 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 56551 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) 56552 v1.AddArg(x) 56553 v1.AddArg(y) 56554 v0.AddArg(v1) 56555 v.AddArg(v0) 56556 return true 56557 } 56558 // match: (Select1 (Mul32uover x y)) 56559 // result: (SETO (Select1 <types.TypeFlags> (MULLU x y))) 56560 for { 56561 v_0 := v.Args[0] 56562 if v_0.Op != OpMul32uover { 56563 break 56564 } 56565 y := v_0.Args[1] 56566 x := v_0.Args[0] 56567 v.reset(OpAMD64SETO) 56568 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 56569 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) 56570 v1.AddArg(x) 56571 v1.AddArg(y) 56572 v0.AddArg(v1) 56573 v.AddArg(v0) 56574 return true 56575 } 56576 // match: (Select1 (Add64carry x y c)) 56577 // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c)))))) 56578 for { 56579 v_0 := v.Args[0] 56580 if v_0.Op != OpAdd64carry { 56581 break 56582 } 56583 c := v_0.Args[2] 56584 x := v_0.Args[0] 56585 y := v_0.Args[1] 56586 v.reset(OpAMD64NEGQ) 56587 v.Type = typ.UInt64 56588 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) 56589 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 56590 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 56591 v2.AddArg(x) 56592 v2.AddArg(y) 56593 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 56594 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 56595 v4.AddArg(c) 56596 v3.AddArg(v4) 56597 v2.AddArg(v3) 56598 v1.AddArg(v2) 56599 v0.AddArg(v1) 56600 v.AddArg(v0) 56601 return true 56602 } 56603 // match: (Select1 (Sub64borrow x y c)) 56604 // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c)))))) 56605 for { 56606 v_0 := v.Args[0] 56607 if v_0.Op != OpSub64borrow { 56608 break 56609 } 56610 c := v_0.Args[2] 56611 x := v_0.Args[0] 56612 y := v_0.Args[1] 56613 v.reset(OpAMD64NEGQ) 56614 v.Type = typ.UInt64 56615 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) 56616 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 56617 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 56618 v2.AddArg(x) 56619 v2.AddArg(y) 56620 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 56621 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) 56622 v4.AddArg(c) 56623 v3.AddArg(v4) 56624 v2.AddArg(v3) 56625 v1.AddArg(v2) 56626 v0.AddArg(v1) 56627 v.AddArg(v0) 56628 return true 56629 } 56630 // match: (Select1 (NEGLflags (MOVQconst [0]))) 56631 // result: (FlagEQ) 56632 for { 56633 v_0 := v.Args[0] 56634 if v_0.Op != OpAMD64NEGLflags { 56635 break 56636 } 56637 v_0_0 := v_0.Args[0] 56638 if v_0_0.Op != OpAMD64MOVQconst || v_0_0.AuxInt != 0 { 56639 break 56640 } 56641 v.reset(OpAMD64FlagEQ) 56642 return true 56643 } 56644 // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) 56645 // result: x 56646 for { 56647 v_0 := v.Args[0] 56648 if v_0.Op != OpAMD64NEGLflags { 56649 break 56650 } 56651 v_0_0 := v_0.Args[0] 56652 if v_0_0.Op != OpAMD64NEGQ { 56653 break 56654 } 56655 v_0_0_0 := v_0_0.Args[0] 56656 if v_0_0_0.Op != OpAMD64SBBQcarrymask { 56657 break 56658 } 56659 x := v_0_0_0.Args[0] 56660 v.reset(OpCopy) 56661 v.Type = x.Type 56662 v.AddArg(x) 56663 return true 56664 } 56665 // match: (Select1 (AddTupleFirst32 _ tuple)) 56666 // result: (Select1 tuple) 56667 for { 56668 v_0 := v.Args[0] 56669 if v_0.Op != OpAMD64AddTupleFirst32 { 56670 break 56671 } 56672 tuple := v_0.Args[1] 56673 v.reset(OpSelect1) 56674 v.AddArg(tuple) 56675 return true 56676 } 56677 // match: (Select1 (AddTupleFirst64 _ tuple)) 56678 // result: (Select1 tuple) 56679 for { 56680 v_0 := v.Args[0] 56681 if v_0.Op != OpAMD64AddTupleFirst64 { 56682 break 56683 } 56684 tuple := v_0.Args[1] 56685 v.reset(OpSelect1) 56686 v.AddArg(tuple) 56687 return true 56688 } 56689 return false 56690 } 56691 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool { 56692 // match: (SignExt16to32 x) 56693 // result: (MOVWQSX x) 56694 for { 56695 x := v.Args[0] 56696 v.reset(OpAMD64MOVWQSX) 56697 v.AddArg(x) 56698 return true 56699 } 56700 } 56701 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool { 56702 // match: (SignExt16to64 x) 56703 // result: (MOVWQSX x) 56704 for { 56705 x := v.Args[0] 56706 v.reset(OpAMD64MOVWQSX) 56707 v.AddArg(x) 56708 return true 56709 } 56710 } 56711 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool { 56712 // match: (SignExt32to64 x) 56713 // result: (MOVLQSX x) 56714 for { 56715 x := v.Args[0] 56716 v.reset(OpAMD64MOVLQSX) 56717 v.AddArg(x) 56718 return true 56719 } 56720 } 56721 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool { 56722 // match: (SignExt8to16 x) 56723 // result: (MOVBQSX x) 56724 for { 56725 x := v.Args[0] 56726 v.reset(OpAMD64MOVBQSX) 56727 v.AddArg(x) 56728 return true 56729 } 56730 } 56731 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool { 56732 // match: (SignExt8to32 x) 56733 // result: (MOVBQSX x) 56734 for { 56735 x := v.Args[0] 56736 v.reset(OpAMD64MOVBQSX) 56737 v.AddArg(x) 56738 return true 56739 } 56740 } 56741 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool { 56742 // match: (SignExt8to64 x) 56743 // result: (MOVBQSX x) 56744 for { 56745 x := v.Args[0] 56746 v.reset(OpAMD64MOVBQSX) 56747 v.AddArg(x) 56748 return true 56749 } 56750 } 56751 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { 56752 b := v.Block 56753 // match: (Slicemask <t> x) 56754 // result: (SARQconst (NEGQ <t> x) [63]) 56755 for { 56756 t := v.Type 56757 x := v.Args[0] 56758 v.reset(OpAMD64SARQconst) 56759 v.AuxInt = 63 56760 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 56761 v0.AddArg(x) 56762 v.AddArg(v0) 56763 return true 56764 } 56765 } 56766 func rewriteValueAMD64_OpSqrt_0(v *Value) bool { 56767 // match: (Sqrt x) 56768 // result: (SQRTSD x) 56769 for { 56770 x := v.Args[0] 56771 v.reset(OpAMD64SQRTSD) 56772 v.AddArg(x) 56773 return true 56774 } 56775 } 56776 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool { 56777 // match: (StaticCall [argwid] {target} mem) 56778 // result: (CALLstatic [argwid] {target} mem) 56779 for { 56780 argwid := v.AuxInt 56781 target := v.Aux 56782 mem := v.Args[0] 56783 v.reset(OpAMD64CALLstatic) 56784 v.AuxInt = argwid 56785 v.Aux = target 56786 v.AddArg(mem) 56787 return true 56788 } 56789 } 56790 func rewriteValueAMD64_OpStore_0(v *Value) bool { 56791 // match: (Store {t} ptr val mem) 56792 // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) 56793 // result: (MOVSDstore ptr val mem) 56794 for { 56795 t := v.Aux 56796 mem := v.Args[2] 56797 ptr := v.Args[0] 56798 val := v.Args[1] 56799 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { 56800 break 56801 } 56802 v.reset(OpAMD64MOVSDstore) 56803 v.AddArg(ptr) 56804 v.AddArg(val) 56805 v.AddArg(mem) 56806 return true 56807 } 56808 // match: (Store {t} ptr val mem) 56809 // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) 56810 // result: (MOVSSstore ptr val mem) 56811 for { 56812 t := v.Aux 56813 mem := v.Args[2] 56814 ptr := v.Args[0] 56815 val := v.Args[1] 56816 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { 56817 break 56818 } 56819 v.reset(OpAMD64MOVSSstore) 56820 v.AddArg(ptr) 56821 v.AddArg(val) 56822 v.AddArg(mem) 56823 return true 56824 } 56825 // match: (Store {t} ptr val mem) 56826 // cond: t.(*types.Type).Size() == 8 56827 // result: (MOVQstore ptr val mem) 56828 for { 56829 t := v.Aux 56830 mem := v.Args[2] 56831 ptr := v.Args[0] 56832 val := v.Args[1] 56833 if !(t.(*types.Type).Size() == 8) { 56834 break 56835 } 56836 v.reset(OpAMD64MOVQstore) 56837 v.AddArg(ptr) 56838 v.AddArg(val) 56839 v.AddArg(mem) 56840 return true 56841 } 56842 // match: (Store {t} ptr val mem) 56843 // cond: t.(*types.Type).Size() == 4 56844 // result: (MOVLstore ptr val mem) 56845 for { 56846 t := v.Aux 56847 mem := v.Args[2] 56848 ptr := v.Args[0] 56849 val := v.Args[1] 56850 if !(t.(*types.Type).Size() == 4) { 56851 break 56852 } 56853 v.reset(OpAMD64MOVLstore) 56854 v.AddArg(ptr) 56855 v.AddArg(val) 56856 v.AddArg(mem) 56857 return true 56858 } 56859 // match: (Store {t} ptr val mem) 56860 // cond: t.(*types.Type).Size() == 2 56861 // result: (MOVWstore ptr val mem) 56862 for { 56863 t := v.Aux 56864 mem := v.Args[2] 56865 ptr := v.Args[0] 56866 val := v.Args[1] 56867 if !(t.(*types.Type).Size() == 2) { 56868 break 56869 } 56870 v.reset(OpAMD64MOVWstore) 56871 v.AddArg(ptr) 56872 v.AddArg(val) 56873 v.AddArg(mem) 56874 return true 56875 } 56876 // match: (Store {t} ptr val mem) 56877 // cond: t.(*types.Type).Size() == 1 56878 // result: (MOVBstore ptr val mem) 56879 for { 56880 t := v.Aux 56881 mem := v.Args[2] 56882 ptr := v.Args[0] 56883 val := v.Args[1] 56884 if !(t.(*types.Type).Size() == 1) { 56885 break 56886 } 56887 v.reset(OpAMD64MOVBstore) 56888 v.AddArg(ptr) 56889 v.AddArg(val) 56890 v.AddArg(mem) 56891 return true 56892 } 56893 return false 56894 } 56895 func rewriteValueAMD64_OpSub16_0(v *Value) bool { 56896 // match: (Sub16 x y) 56897 // result: (SUBL x y) 56898 for { 56899 y := v.Args[1] 56900 x := v.Args[0] 56901 v.reset(OpAMD64SUBL) 56902 v.AddArg(x) 56903 v.AddArg(y) 56904 return true 56905 } 56906 } 56907 func rewriteValueAMD64_OpSub32_0(v *Value) bool { 56908 // match: (Sub32 x y) 56909 // result: (SUBL x y) 56910 for { 56911 y := v.Args[1] 56912 x := v.Args[0] 56913 v.reset(OpAMD64SUBL) 56914 v.AddArg(x) 56915 v.AddArg(y) 56916 return true 56917 } 56918 } 56919 func rewriteValueAMD64_OpSub32F_0(v *Value) bool { 56920 // match: (Sub32F x y) 56921 // result: (SUBSS x y) 56922 for { 56923 y := v.Args[1] 56924 x := v.Args[0] 56925 v.reset(OpAMD64SUBSS) 56926 v.AddArg(x) 56927 v.AddArg(y) 56928 return true 56929 } 56930 } 56931 func rewriteValueAMD64_OpSub64_0(v *Value) bool { 56932 // match: (Sub64 x y) 56933 // result: (SUBQ x y) 56934 for { 56935 y := v.Args[1] 56936 x := v.Args[0] 56937 v.reset(OpAMD64SUBQ) 56938 v.AddArg(x) 56939 v.AddArg(y) 56940 return true 56941 } 56942 } 56943 func rewriteValueAMD64_OpSub64F_0(v *Value) bool { 56944 // match: (Sub64F x y) 56945 // result: (SUBSD x y) 56946 for { 56947 y := v.Args[1] 56948 x := v.Args[0] 56949 v.reset(OpAMD64SUBSD) 56950 v.AddArg(x) 56951 v.AddArg(y) 56952 return true 56953 } 56954 } 56955 func rewriteValueAMD64_OpSub8_0(v *Value) bool { 56956 // match: (Sub8 x y) 56957 // result: (SUBL x y) 56958 for { 56959 y := v.Args[1] 56960 x := v.Args[0] 56961 v.reset(OpAMD64SUBL) 56962 v.AddArg(x) 56963 v.AddArg(y) 56964 return true 56965 } 56966 } 56967 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { 56968 // match: (SubPtr x y) 56969 // result: (SUBQ x y) 56970 for { 56971 y := v.Args[1] 56972 x := v.Args[0] 56973 v.reset(OpAMD64SUBQ) 56974 v.AddArg(x) 56975 v.AddArg(y) 56976 return true 56977 } 56978 } 56979 func rewriteValueAMD64_OpTrunc_0(v *Value) bool { 56980 // match: (Trunc x) 56981 // result: (ROUNDSD [3] x) 56982 for { 56983 x := v.Args[0] 56984 v.reset(OpAMD64ROUNDSD) 56985 v.AuxInt = 3 56986 v.AddArg(x) 56987 return true 56988 } 56989 } 56990 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { 56991 // match: (Trunc16to8 x) 56992 // result: x 56993 for { 56994 x := v.Args[0] 56995 v.reset(OpCopy) 56996 v.Type = x.Type 56997 v.AddArg(x) 56998 return true 56999 } 57000 } 57001 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool { 57002 // match: (Trunc32to16 x) 57003 // result: x 57004 for { 57005 x := v.Args[0] 57006 v.reset(OpCopy) 57007 v.Type = x.Type 57008 v.AddArg(x) 57009 return true 57010 } 57011 } 57012 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool { 57013 // match: (Trunc32to8 x) 57014 // result: x 57015 for { 57016 x := v.Args[0] 57017 v.reset(OpCopy) 57018 v.Type = x.Type 57019 v.AddArg(x) 57020 return true 57021 } 57022 } 57023 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool { 57024 // match: (Trunc64to16 x) 57025 // result: x 57026 for { 57027 x := v.Args[0] 57028 v.reset(OpCopy) 57029 v.Type = x.Type 57030 v.AddArg(x) 57031 return true 57032 } 57033 } 57034 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool { 57035 // match: (Trunc64to32 x) 57036 // result: x 57037 for { 57038 x := v.Args[0] 57039 v.reset(OpCopy) 57040 v.Type = x.Type 57041 v.AddArg(x) 57042 return true 57043 } 57044 } 57045 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { 57046 // match: (Trunc64to8 x) 57047 // result: x 57048 for { 57049 x := v.Args[0] 57050 v.reset(OpCopy) 57051 v.Type = x.Type 57052 v.AddArg(x) 57053 return true 57054 } 57055 } 57056 func rewriteValueAMD64_OpWB_0(v *Value) bool { 57057 // match: (WB {fn} destptr srcptr mem) 57058 // result: (LoweredWB {fn} destptr srcptr mem) 57059 for { 57060 fn := v.Aux 57061 mem := v.Args[2] 57062 destptr := v.Args[0] 57063 srcptr := v.Args[1] 57064 v.reset(OpAMD64LoweredWB) 57065 v.Aux = fn 57066 v.AddArg(destptr) 57067 v.AddArg(srcptr) 57068 v.AddArg(mem) 57069 return true 57070 } 57071 } 57072 func rewriteValueAMD64_OpXor16_0(v *Value) bool { 57073 // match: (Xor16 x y) 57074 // result: (XORL x y) 57075 for { 57076 y := v.Args[1] 57077 x := v.Args[0] 57078 v.reset(OpAMD64XORL) 57079 v.AddArg(x) 57080 v.AddArg(y) 57081 return true 57082 } 57083 } 57084 func rewriteValueAMD64_OpXor32_0(v *Value) bool { 57085 // match: (Xor32 x y) 57086 // result: (XORL x y) 57087 for { 57088 y := v.Args[1] 57089 x := v.Args[0] 57090 v.reset(OpAMD64XORL) 57091 v.AddArg(x) 57092 v.AddArg(y) 57093 return true 57094 } 57095 } 57096 func rewriteValueAMD64_OpXor64_0(v *Value) bool { 57097 // match: (Xor64 x y) 57098 // result: (XORQ x y) 57099 for { 57100 y := v.Args[1] 57101 x := v.Args[0] 57102 v.reset(OpAMD64XORQ) 57103 v.AddArg(x) 57104 v.AddArg(y) 57105 return true 57106 } 57107 } 57108 func rewriteValueAMD64_OpXor8_0(v *Value) bool { 57109 // match: (Xor8 x y) 57110 // result: (XORL x y) 57111 for { 57112 y := v.Args[1] 57113 x := v.Args[0] 57114 v.reset(OpAMD64XORL) 57115 v.AddArg(x) 57116 v.AddArg(y) 57117 return true 57118 } 57119 } 57120 func rewriteValueAMD64_OpZero_0(v *Value) bool { 57121 b := v.Block 57122 config := b.Func.Config 57123 // match: (Zero [0] _ mem) 57124 // result: mem 57125 for { 57126 if v.AuxInt != 0 { 57127 break 57128 } 57129 mem := v.Args[1] 57130 v.reset(OpCopy) 57131 v.Type = mem.Type 57132 v.AddArg(mem) 57133 return true 57134 } 57135 // match: (Zero [1] destptr mem) 57136 // result: (MOVBstoreconst [0] destptr mem) 57137 for { 57138 if v.AuxInt != 1 { 57139 break 57140 } 57141 mem := v.Args[1] 57142 destptr := v.Args[0] 57143 v.reset(OpAMD64MOVBstoreconst) 57144 v.AuxInt = 0 57145 v.AddArg(destptr) 57146 v.AddArg(mem) 57147 return true 57148 } 57149 // match: (Zero [2] destptr mem) 57150 // result: (MOVWstoreconst [0] destptr mem) 57151 for { 57152 if v.AuxInt != 2 { 57153 break 57154 } 57155 mem := v.Args[1] 57156 destptr := v.Args[0] 57157 v.reset(OpAMD64MOVWstoreconst) 57158 v.AuxInt = 0 57159 v.AddArg(destptr) 57160 v.AddArg(mem) 57161 return true 57162 } 57163 // match: (Zero [4] destptr mem) 57164 // result: (MOVLstoreconst [0] destptr mem) 57165 for { 57166 if v.AuxInt != 4 { 57167 break 57168 } 57169 mem := v.Args[1] 57170 destptr := v.Args[0] 57171 v.reset(OpAMD64MOVLstoreconst) 57172 v.AuxInt = 0 57173 v.AddArg(destptr) 57174 v.AddArg(mem) 57175 return true 57176 } 57177 // match: (Zero [8] destptr mem) 57178 // result: (MOVQstoreconst [0] destptr mem) 57179 for { 57180 if v.AuxInt != 8 { 57181 break 57182 } 57183 mem := v.Args[1] 57184 destptr := v.Args[0] 57185 v.reset(OpAMD64MOVQstoreconst) 57186 v.AuxInt = 0 57187 v.AddArg(destptr) 57188 v.AddArg(mem) 57189 return true 57190 } 57191 // match: (Zero [3] destptr mem) 57192 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 57193 for { 57194 if v.AuxInt != 3 { 57195 break 57196 } 57197 mem := v.Args[1] 57198 destptr := v.Args[0] 57199 v.reset(OpAMD64MOVBstoreconst) 57200 v.AuxInt = makeValAndOff(0, 2) 57201 v.AddArg(destptr) 57202 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 57203 v0.AuxInt = 0 57204 v0.AddArg(destptr) 57205 v0.AddArg(mem) 57206 v.AddArg(v0) 57207 return true 57208 } 57209 // match: (Zero [5] destptr mem) 57210 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 57211 for { 57212 if v.AuxInt != 5 { 57213 break 57214 } 57215 mem := v.Args[1] 57216 destptr := v.Args[0] 57217 v.reset(OpAMD64MOVBstoreconst) 57218 v.AuxInt = makeValAndOff(0, 4) 57219 v.AddArg(destptr) 57220 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 57221 v0.AuxInt = 0 57222 v0.AddArg(destptr) 57223 v0.AddArg(mem) 57224 v.AddArg(v0) 57225 return true 57226 } 57227 // match: (Zero [6] destptr mem) 57228 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 57229 for { 57230 if v.AuxInt != 6 { 57231 break 57232 } 57233 mem := v.Args[1] 57234 destptr := v.Args[0] 57235 v.reset(OpAMD64MOVWstoreconst) 57236 v.AuxInt = makeValAndOff(0, 4) 57237 v.AddArg(destptr) 57238 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 57239 v0.AuxInt = 0 57240 v0.AddArg(destptr) 57241 v0.AddArg(mem) 57242 v.AddArg(v0) 57243 return true 57244 } 57245 // match: (Zero [7] destptr mem) 57246 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 57247 for { 57248 if v.AuxInt != 7 { 57249 break 57250 } 57251 mem := v.Args[1] 57252 destptr := v.Args[0] 57253 v.reset(OpAMD64MOVLstoreconst) 57254 v.AuxInt = makeValAndOff(0, 3) 57255 v.AddArg(destptr) 57256 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 57257 v0.AuxInt = 0 57258 v0.AddArg(destptr) 57259 v0.AddArg(mem) 57260 v.AddArg(v0) 57261 return true 57262 } 57263 // match: (Zero [s] destptr mem) 57264 // cond: s%8 != 0 && s > 8 && !config.useSSE 57265 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem)) 57266 for { 57267 s := v.AuxInt 57268 mem := v.Args[1] 57269 destptr := v.Args[0] 57270 if !(s%8 != 0 && s > 8 && !config.useSSE) { 57271 break 57272 } 57273 v.reset(OpZero) 57274 v.AuxInt = s - s%8 57275 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 57276 v0.AuxInt = s % 8 57277 v0.AddArg(destptr) 57278 v.AddArg(v0) 57279 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 57280 v1.AuxInt = 0 57281 v1.AddArg(destptr) 57282 v1.AddArg(mem) 57283 v.AddArg(v1) 57284 return true 57285 } 57286 return false 57287 } 57288 func rewriteValueAMD64_OpZero_10(v *Value) bool { 57289 b := v.Block 57290 config := b.Func.Config 57291 // match: (Zero [16] destptr mem) 57292 // cond: !config.useSSE 57293 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 57294 for { 57295 if v.AuxInt != 16 { 57296 break 57297 } 57298 mem := v.Args[1] 57299 destptr := v.Args[0] 57300 if !(!config.useSSE) { 57301 break 57302 } 57303 v.reset(OpAMD64MOVQstoreconst) 57304 v.AuxInt = makeValAndOff(0, 8) 57305 v.AddArg(destptr) 57306 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 57307 v0.AuxInt = 0 57308 v0.AddArg(destptr) 57309 v0.AddArg(mem) 57310 v.AddArg(v0) 57311 return true 57312 } 57313 // match: (Zero [24] destptr mem) 57314 // cond: !config.useSSE 57315 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 57316 for { 57317 if v.AuxInt != 24 { 57318 break 57319 } 57320 mem := v.Args[1] 57321 destptr := v.Args[0] 57322 if !(!config.useSSE) { 57323 break 57324 } 57325 v.reset(OpAMD64MOVQstoreconst) 57326 v.AuxInt = makeValAndOff(0, 16) 57327 v.AddArg(destptr) 57328 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 57329 v0.AuxInt = makeValAndOff(0, 8) 57330 v0.AddArg(destptr) 57331 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 57332 v1.AuxInt = 0 57333 v1.AddArg(destptr) 57334 v1.AddArg(mem) 57335 v0.AddArg(v1) 57336 v.AddArg(v0) 57337 return true 57338 } 57339 // match: (Zero [32] destptr mem) 57340 // cond: !config.useSSE 57341 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 57342 for { 57343 if v.AuxInt != 32 { 57344 break 57345 } 57346 mem := v.Args[1] 57347 destptr := v.Args[0] 57348 if !(!config.useSSE) { 57349 break 57350 } 57351 v.reset(OpAMD64MOVQstoreconst) 57352 v.AuxInt = makeValAndOff(0, 24) 57353 v.AddArg(destptr) 57354 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 57355 v0.AuxInt = makeValAndOff(0, 16) 57356 v0.AddArg(destptr) 57357 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 57358 v1.AuxInt = makeValAndOff(0, 8) 57359 v1.AddArg(destptr) 57360 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 57361 v2.AuxInt = 0 57362 v2.AddArg(destptr) 57363 v2.AddArg(mem) 57364 v1.AddArg(v2) 57365 v0.AddArg(v1) 57366 v.AddArg(v0) 57367 return true 57368 } 57369 // match: (Zero [s] destptr mem) 57370 // cond: s > 8 && s < 16 && config.useSSE 57371 // result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem)) 57372 for { 57373 s := v.AuxInt 57374 mem := v.Args[1] 57375 destptr := v.Args[0] 57376 if !(s > 8 && s < 16 && config.useSSE) { 57377 break 57378 } 57379 v.reset(OpAMD64MOVQstoreconst) 57380 v.AuxInt = makeValAndOff(0, s-8) 57381 v.AddArg(destptr) 57382 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 57383 v0.AuxInt = 0 57384 v0.AddArg(destptr) 57385 v0.AddArg(mem) 57386 v.AddArg(v0) 57387 return true 57388 } 57389 // match: (Zero [s] destptr mem) 57390 // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE 57391 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem)) 57392 for { 57393 s := v.AuxInt 57394 mem := v.Args[1] 57395 destptr := v.Args[0] 57396 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { 57397 break 57398 } 57399 v.reset(OpZero) 57400 v.AuxInt = s - s%16 57401 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 57402 v0.AuxInt = s % 16 57403 v0.AddArg(destptr) 57404 v.AddArg(v0) 57405 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 57406 v1.AddArg(destptr) 57407 v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 57408 v2.AuxInt = 0 57409 v1.AddArg(v2) 57410 v1.AddArg(mem) 57411 v.AddArg(v1) 57412 return true 57413 } 57414 // match: (Zero [s] destptr mem) 57415 // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE 57416 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [0] destptr mem)) 57417 for { 57418 s := v.AuxInt 57419 mem := v.Args[1] 57420 destptr := v.Args[0] 57421 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { 57422 break 57423 } 57424 v.reset(OpZero) 57425 v.AuxInt = s - s%16 57426 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 57427 v0.AuxInt = s % 16 57428 v0.AddArg(destptr) 57429 v.AddArg(v0) 57430 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 57431 v1.AuxInt = 0 57432 v1.AddArg(destptr) 57433 v1.AddArg(mem) 57434 v.AddArg(v1) 57435 return true 57436 } 57437 // match: (Zero [16] destptr mem) 57438 // cond: config.useSSE 57439 // result: (MOVOstore destptr (MOVOconst [0]) mem) 57440 for { 57441 if v.AuxInt != 16 { 57442 break 57443 } 57444 mem := v.Args[1] 57445 destptr := v.Args[0] 57446 if !(config.useSSE) { 57447 break 57448 } 57449 v.reset(OpAMD64MOVOstore) 57450 v.AddArg(destptr) 57451 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 57452 v0.AuxInt = 0 57453 v.AddArg(v0) 57454 v.AddArg(mem) 57455 return true 57456 } 57457 // match: (Zero [32] destptr mem) 57458 // cond: config.useSSE 57459 // result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)) 57460 for { 57461 if v.AuxInt != 32 { 57462 break 57463 } 57464 mem := v.Args[1] 57465 destptr := v.Args[0] 57466 if !(config.useSSE) { 57467 break 57468 } 57469 v.reset(OpAMD64MOVOstore) 57470 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 57471 v0.AuxInt = 16 57472 v0.AddArg(destptr) 57473 v.AddArg(v0) 57474 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 57475 v1.AuxInt = 0 57476 v.AddArg(v1) 57477 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 57478 v2.AddArg(destptr) 57479 v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 57480 v3.AuxInt = 0 57481 v2.AddArg(v3) 57482 v2.AddArg(mem) 57483 v.AddArg(v2) 57484 return true 57485 } 57486 // match: (Zero [48] destptr mem) 57487 // cond: config.useSSE 57488 // result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))) 57489 for { 57490 if v.AuxInt != 48 { 57491 break 57492 } 57493 mem := v.Args[1] 57494 destptr := v.Args[0] 57495 if !(config.useSSE) { 57496 break 57497 } 57498 v.reset(OpAMD64MOVOstore) 57499 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 57500 v0.AuxInt = 32 57501 v0.AddArg(destptr) 57502 v.AddArg(v0) 57503 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 57504 v1.AuxInt = 0 57505 v.AddArg(v1) 57506 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 57507 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 57508 v3.AuxInt = 16 57509 v3.AddArg(destptr) 57510 v2.AddArg(v3) 57511 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 57512 v4.AuxInt = 0 57513 v2.AddArg(v4) 57514 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 57515 v5.AddArg(destptr) 57516 v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 57517 v6.AuxInt = 0 57518 v5.AddArg(v6) 57519 v5.AddArg(mem) 57520 v2.AddArg(v5) 57521 v.AddArg(v2) 57522 return true 57523 } 57524 // match: (Zero [64] destptr mem) 57525 // cond: config.useSSE 57526 // result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))) 57527 for { 57528 if v.AuxInt != 64 { 57529 break 57530 } 57531 mem := v.Args[1] 57532 destptr := v.Args[0] 57533 if !(config.useSSE) { 57534 break 57535 } 57536 v.reset(OpAMD64MOVOstore) 57537 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 57538 v0.AuxInt = 48 57539 v0.AddArg(destptr) 57540 v.AddArg(v0) 57541 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 57542 v1.AuxInt = 0 57543 v.AddArg(v1) 57544 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 57545 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 57546 v3.AuxInt = 32 57547 v3.AddArg(destptr) 57548 v2.AddArg(v3) 57549 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 57550 v4.AuxInt = 0 57551 v2.AddArg(v4) 57552 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 57553 v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 57554 v6.AuxInt = 16 57555 v6.AddArg(destptr) 57556 v5.AddArg(v6) 57557 v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 57558 v7.AuxInt = 0 57559 v5.AddArg(v7) 57560 v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 57561 v8.AddArg(destptr) 57562 v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 57563 v9.AuxInt = 0 57564 v8.AddArg(v9) 57565 v8.AddArg(mem) 57566 v5.AddArg(v8) 57567 v2.AddArg(v5) 57568 v.AddArg(v2) 57569 return true 57570 } 57571 return false 57572 } 57573 func rewriteValueAMD64_OpZero_20(v *Value) bool { 57574 b := v.Block 57575 config := b.Func.Config 57576 typ := &b.Func.Config.Types 57577 // match: (Zero [s] destptr mem) 57578 // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice 57579 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 57580 for { 57581 s := v.AuxInt 57582 mem := v.Args[1] 57583 destptr := v.Args[0] 57584 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 57585 break 57586 } 57587 v.reset(OpAMD64DUFFZERO) 57588 v.AuxInt = s 57589 v.AddArg(destptr) 57590 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 57591 v0.AuxInt = 0 57592 v.AddArg(v0) 57593 v.AddArg(mem) 57594 return true 57595 } 57596 // match: (Zero [s] destptr mem) 57597 // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 57598 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 57599 for { 57600 s := v.AuxInt 57601 mem := v.Args[1] 57602 destptr := v.Args[0] 57603 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { 57604 break 57605 } 57606 v.reset(OpAMD64REPSTOSQ) 57607 v.AddArg(destptr) 57608 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 57609 v0.AuxInt = s / 8 57610 v.AddArg(v0) 57611 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 57612 v1.AuxInt = 0 57613 v.AddArg(v1) 57614 v.AddArg(mem) 57615 return true 57616 } 57617 return false 57618 } 57619 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool { 57620 // match: (ZeroExt16to32 x) 57621 // result: (MOVWQZX x) 57622 for { 57623 x := v.Args[0] 57624 v.reset(OpAMD64MOVWQZX) 57625 v.AddArg(x) 57626 return true 57627 } 57628 } 57629 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool { 57630 // match: (ZeroExt16to64 x) 57631 // result: (MOVWQZX x) 57632 for { 57633 x := v.Args[0] 57634 v.reset(OpAMD64MOVWQZX) 57635 v.AddArg(x) 57636 return true 57637 } 57638 } 57639 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool { 57640 // match: (ZeroExt32to64 x) 57641 // result: (MOVLQZX x) 57642 for { 57643 x := v.Args[0] 57644 v.reset(OpAMD64MOVLQZX) 57645 v.AddArg(x) 57646 return true 57647 } 57648 } 57649 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool { 57650 // match: (ZeroExt8to16 x) 57651 // result: (MOVBQZX x) 57652 for { 57653 x := v.Args[0] 57654 v.reset(OpAMD64MOVBQZX) 57655 v.AddArg(x) 57656 return true 57657 } 57658 } 57659 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool { 57660 // match: (ZeroExt8to32 x) 57661 // result: (MOVBQZX x) 57662 for { 57663 x := v.Args[0] 57664 v.reset(OpAMD64MOVBQZX) 57665 v.AddArg(x) 57666 return true 57667 } 57668 } 57669 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool { 57670 // match: (ZeroExt8to64 x) 57671 // result: (MOVBQZX x) 57672 for { 57673 x := v.Args[0] 57674 v.reset(OpAMD64MOVBQZX) 57675 v.AddArg(x) 57676 return true 57677 } 57678 } 57679 func rewriteBlockAMD64(b *Block) bool { 57680 switch b.Kind { 57681 case BlockAMD64EQ: 57682 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 57683 // result: (UGE (BTL x y)) 57684 for b.Controls[0].Op == OpAMD64TESTL { 57685 v_0 := b.Controls[0] 57686 y := v_0.Args[1] 57687 v_0_0 := v_0.Args[0] 57688 if v_0_0.Op != OpAMD64SHLL { 57689 break 57690 } 57691 x := v_0_0.Args[1] 57692 v_0_0_0 := v_0_0.Args[0] 57693 if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { 57694 break 57695 } 57696 b.Reset(BlockAMD64UGE) 57697 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) 57698 v0.AddArg(x) 57699 v0.AddArg(y) 57700 b.AddControl(v0) 57701 return true 57702 } 57703 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 57704 // result: (UGE (BTL x y)) 57705 for b.Controls[0].Op == OpAMD64TESTL { 57706 v_0 := b.Controls[0] 57707 _ = v_0.Args[1] 57708 y := v_0.Args[0] 57709 v_0_1 := v_0.Args[1] 57710 if v_0_1.Op != OpAMD64SHLL { 57711 break 57712 } 57713 x := v_0_1.Args[1] 57714 v_0_1_0 := v_0_1.Args[0] 57715 if v_0_1_0.Op != OpAMD64MOVLconst || v_0_1_0.AuxInt != 1 { 57716 break 57717 } 57718 b.Reset(BlockAMD64UGE) 57719 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) 57720 v0.AddArg(x) 57721 v0.AddArg(y) 57722 b.AddControl(v0) 57723 return true 57724 } 57725 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 57726 // result: (UGE (BTQ x y)) 57727 for b.Controls[0].Op == OpAMD64TESTQ { 57728 v_0 := b.Controls[0] 57729 y := v_0.Args[1] 57730 v_0_0 := v_0.Args[0] 57731 if v_0_0.Op != OpAMD64SHLQ { 57732 break 57733 } 57734 x := v_0_0.Args[1] 57735 v_0_0_0 := v_0_0.Args[0] 57736 if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { 57737 break 57738 } 57739 b.Reset(BlockAMD64UGE) 57740 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) 57741 v0.AddArg(x) 57742 v0.AddArg(y) 57743 b.AddControl(v0) 57744 return true 57745 } 57746 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 57747 // result: (UGE (BTQ x y)) 57748 for b.Controls[0].Op == OpAMD64TESTQ { 57749 v_0 := b.Controls[0] 57750 _ = v_0.Args[1] 57751 y := v_0.Args[0] 57752 v_0_1 := v_0.Args[1] 57753 if v_0_1.Op != OpAMD64SHLQ { 57754 break 57755 } 57756 x := v_0_1.Args[1] 57757 v_0_1_0 := v_0_1.Args[0] 57758 if v_0_1_0.Op != OpAMD64MOVQconst || v_0_1_0.AuxInt != 1 { 57759 break 57760 } 57761 b.Reset(BlockAMD64UGE) 57762 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) 57763 v0.AddArg(x) 57764 v0.AddArg(y) 57765 b.AddControl(v0) 57766 return true 57767 } 57768 // match: (EQ (TESTLconst [c] x)) 57769 // cond: isUint32PowerOfTwo(c) 57770 // result: (UGE (BTLconst [log2uint32(c)] x)) 57771 for b.Controls[0].Op == OpAMD64TESTLconst { 57772 v_0 := b.Controls[0] 57773 c := v_0.AuxInt 57774 x := v_0.Args[0] 57775 if !(isUint32PowerOfTwo(c)) { 57776 break 57777 } 57778 b.Reset(BlockAMD64UGE) 57779 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 57780 v0.AuxInt = log2uint32(c) 57781 v0.AddArg(x) 57782 b.AddControl(v0) 57783 return true 57784 } 57785 // match: (EQ (TESTQconst [c] x)) 57786 // cond: isUint64PowerOfTwo(c) 57787 // result: (UGE (BTQconst [log2(c)] x)) 57788 for b.Controls[0].Op == OpAMD64TESTQconst { 57789 v_0 := b.Controls[0] 57790 c := v_0.AuxInt 57791 x := v_0.Args[0] 57792 if !(isUint64PowerOfTwo(c)) { 57793 break 57794 } 57795 b.Reset(BlockAMD64UGE) 57796 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 57797 v0.AuxInt = log2(c) 57798 v0.AddArg(x) 57799 b.AddControl(v0) 57800 return true 57801 } 57802 // match: (EQ (TESTQ (MOVQconst [c]) x)) 57803 // cond: isUint64PowerOfTwo(c) 57804 // result: (UGE (BTQconst [log2(c)] x)) 57805 for b.Controls[0].Op == OpAMD64TESTQ { 57806 v_0 := b.Controls[0] 57807 x := v_0.Args[1] 57808 v_0_0 := v_0.Args[0] 57809 if v_0_0.Op != OpAMD64MOVQconst { 57810 break 57811 } 57812 c := v_0_0.AuxInt 57813 if !(isUint64PowerOfTwo(c)) { 57814 break 57815 } 57816 b.Reset(BlockAMD64UGE) 57817 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 57818 v0.AuxInt = log2(c) 57819 v0.AddArg(x) 57820 b.AddControl(v0) 57821 return true 57822 } 57823 // match: (EQ (TESTQ x (MOVQconst [c]))) 57824 // cond: isUint64PowerOfTwo(c) 57825 // result: (UGE (BTQconst [log2(c)] x)) 57826 for b.Controls[0].Op == OpAMD64TESTQ { 57827 v_0 := b.Controls[0] 57828 _ = v_0.Args[1] 57829 x := v_0.Args[0] 57830 v_0_1 := v_0.Args[1] 57831 if v_0_1.Op != OpAMD64MOVQconst { 57832 break 57833 } 57834 c := v_0_1.AuxInt 57835 if !(isUint64PowerOfTwo(c)) { 57836 break 57837 } 57838 b.Reset(BlockAMD64UGE) 57839 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 57840 v0.AuxInt = log2(c) 57841 v0.AddArg(x) 57842 b.AddControl(v0) 57843 return true 57844 } 57845 // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 57846 // cond: z1==z2 57847 // result: (UGE (BTQconst [63] x)) 57848 for b.Controls[0].Op == OpAMD64TESTQ { 57849 v_0 := b.Controls[0] 57850 z2 := v_0.Args[1] 57851 z1 := v_0.Args[0] 57852 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { 57853 break 57854 } 57855 z1_0 := z1.Args[0] 57856 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { 57857 break 57858 } 57859 x := z1_0.Args[0] 57860 if !(z1 == z2) { 57861 break 57862 } 57863 b.Reset(BlockAMD64UGE) 57864 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 57865 v0.AuxInt = 63 57866 v0.AddArg(x) 57867 b.AddControl(v0) 57868 return true 57869 } 57870 // match: (EQ (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) 57871 // cond: z1==z2 57872 // result: (UGE (BTQconst [63] x)) 57873 for b.Controls[0].Op == OpAMD64TESTQ { 57874 v_0 := b.Controls[0] 57875 _ = v_0.Args[1] 57876 z2 := v_0.Args[0] 57877 z1 := v_0.Args[1] 57878 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { 57879 break 57880 } 57881 z1_0 := z1.Args[0] 57882 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { 57883 break 57884 } 57885 x := z1_0.Args[0] 57886 if !(z1 == z2) { 57887 break 57888 } 57889 b.Reset(BlockAMD64UGE) 57890 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 57891 v0.AuxInt = 63 57892 v0.AddArg(x) 57893 b.AddControl(v0) 57894 return true 57895 } 57896 // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 57897 // cond: z1==z2 57898 // result: (UGE (BTQconst [31] x)) 57899 for b.Controls[0].Op == OpAMD64TESTL { 57900 v_0 := b.Controls[0] 57901 z2 := v_0.Args[1] 57902 z1 := v_0.Args[0] 57903 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { 57904 break 57905 } 57906 z1_0 := z1.Args[0] 57907 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { 57908 break 57909 } 57910 x := z1_0.Args[0] 57911 if !(z1 == z2) { 57912 break 57913 } 57914 b.Reset(BlockAMD64UGE) 57915 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 57916 v0.AuxInt = 31 57917 v0.AddArg(x) 57918 b.AddControl(v0) 57919 return true 57920 } 57921 // match: (EQ (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) 57922 // cond: z1==z2 57923 // result: (UGE (BTQconst [31] x)) 57924 for b.Controls[0].Op == OpAMD64TESTL { 57925 v_0 := b.Controls[0] 57926 _ = v_0.Args[1] 57927 z2 := v_0.Args[0] 57928 z1 := v_0.Args[1] 57929 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { 57930 break 57931 } 57932 z1_0 := z1.Args[0] 57933 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { 57934 break 57935 } 57936 x := z1_0.Args[0] 57937 if !(z1 == z2) { 57938 break 57939 } 57940 b.Reset(BlockAMD64UGE) 57941 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 57942 v0.AuxInt = 31 57943 v0.AddArg(x) 57944 b.AddControl(v0) 57945 return true 57946 } 57947 // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 57948 // cond: z1==z2 57949 // result: (UGE (BTQconst [0] x)) 57950 for b.Controls[0].Op == OpAMD64TESTQ { 57951 v_0 := b.Controls[0] 57952 z2 := v_0.Args[1] 57953 z1 := v_0.Args[0] 57954 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 57955 break 57956 } 57957 z1_0 := z1.Args[0] 57958 if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { 57959 break 57960 } 57961 x := z1_0.Args[0] 57962 if !(z1 == z2) { 57963 break 57964 } 57965 b.Reset(BlockAMD64UGE) 57966 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 57967 v0.AuxInt = 0 57968 v0.AddArg(x) 57969 b.AddControl(v0) 57970 return true 57971 } 57972 // match: (EQ (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) 57973 // cond: z1==z2 57974 // result: (UGE (BTQconst [0] x)) 57975 for b.Controls[0].Op == OpAMD64TESTQ { 57976 v_0 := b.Controls[0] 57977 _ = v_0.Args[1] 57978 z2 := v_0.Args[0] 57979 z1 := v_0.Args[1] 57980 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 57981 break 57982 } 57983 z1_0 := z1.Args[0] 57984 if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { 57985 break 57986 } 57987 x := z1_0.Args[0] 57988 if !(z1 == z2) { 57989 break 57990 } 57991 b.Reset(BlockAMD64UGE) 57992 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 57993 v0.AuxInt = 0 57994 v0.AddArg(x) 57995 b.AddControl(v0) 57996 return true 57997 } 57998 // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 57999 // cond: z1==z2 58000 // result: (UGE (BTLconst [0] x)) 58001 for b.Controls[0].Op == OpAMD64TESTL { 58002 v_0 := b.Controls[0] 58003 z2 := v_0.Args[1] 58004 z1 := v_0.Args[0] 58005 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 58006 break 58007 } 58008 z1_0 := z1.Args[0] 58009 if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { 58010 break 58011 } 58012 x := z1_0.Args[0] 58013 if !(z1 == z2) { 58014 break 58015 } 58016 b.Reset(BlockAMD64UGE) 58017 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 58018 v0.AuxInt = 0 58019 v0.AddArg(x) 58020 b.AddControl(v0) 58021 return true 58022 } 58023 // match: (EQ (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) 58024 // cond: z1==z2 58025 // result: (UGE (BTLconst [0] x)) 58026 for b.Controls[0].Op == OpAMD64TESTL { 58027 v_0 := b.Controls[0] 58028 _ = v_0.Args[1] 58029 z2 := v_0.Args[0] 58030 z1 := v_0.Args[1] 58031 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 58032 break 58033 } 58034 z1_0 := z1.Args[0] 58035 if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { 58036 break 58037 } 58038 x := z1_0.Args[0] 58039 if !(z1 == z2) { 58040 break 58041 } 58042 b.Reset(BlockAMD64UGE) 58043 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 58044 v0.AuxInt = 0 58045 v0.AddArg(x) 58046 b.AddControl(v0) 58047 return true 58048 } 58049 // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2)) 58050 // cond: z1==z2 58051 // result: (UGE (BTQconst [63] x)) 58052 for b.Controls[0].Op == OpAMD64TESTQ { 58053 v_0 := b.Controls[0] 58054 z2 := v_0.Args[1] 58055 z1 := v_0.Args[0] 58056 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 58057 break 58058 } 58059 x := z1.Args[0] 58060 if !(z1 == z2) { 58061 break 58062 } 58063 b.Reset(BlockAMD64UGE) 58064 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 58065 v0.AuxInt = 63 58066 v0.AddArg(x) 58067 b.AddControl(v0) 58068 return true 58069 } 58070 // match: (EQ (TESTQ z2 z1:(SHRQconst [63] x))) 58071 // cond: z1==z2 58072 // result: (UGE (BTQconst [63] x)) 58073 for b.Controls[0].Op == OpAMD64TESTQ { 58074 v_0 := b.Controls[0] 58075 _ = v_0.Args[1] 58076 z2 := v_0.Args[0] 58077 z1 := v_0.Args[1] 58078 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 58079 break 58080 } 58081 x := z1.Args[0] 58082 if !(z1 == z2) { 58083 break 58084 } 58085 b.Reset(BlockAMD64UGE) 58086 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 58087 v0.AuxInt = 63 58088 v0.AddArg(x) 58089 b.AddControl(v0) 58090 return true 58091 } 58092 // match: (EQ (TESTL z1:(SHRLconst [31] x) z2)) 58093 // cond: z1==z2 58094 // result: (UGE (BTLconst [31] x)) 58095 for b.Controls[0].Op == OpAMD64TESTL { 58096 v_0 := b.Controls[0] 58097 z2 := v_0.Args[1] 58098 z1 := v_0.Args[0] 58099 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 58100 break 58101 } 58102 x := z1.Args[0] 58103 if !(z1 == z2) { 58104 break 58105 } 58106 b.Reset(BlockAMD64UGE) 58107 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 58108 v0.AuxInt = 31 58109 v0.AddArg(x) 58110 b.AddControl(v0) 58111 return true 58112 } 58113 // match: (EQ (TESTL z2 z1:(SHRLconst [31] x))) 58114 // cond: z1==z2 58115 // result: (UGE (BTLconst [31] x)) 58116 for b.Controls[0].Op == OpAMD64TESTL { 58117 v_0 := b.Controls[0] 58118 _ = v_0.Args[1] 58119 z2 := v_0.Args[0] 58120 z1 := v_0.Args[1] 58121 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 58122 break 58123 } 58124 x := z1.Args[0] 58125 if !(z1 == z2) { 58126 break 58127 } 58128 b.Reset(BlockAMD64UGE) 58129 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 58130 v0.AuxInt = 31 58131 v0.AddArg(x) 58132 b.AddControl(v0) 58133 return true 58134 } 58135 // match: (EQ (InvertFlags cmp) yes no) 58136 // result: (EQ cmp yes no) 58137 for b.Controls[0].Op == OpAMD64InvertFlags { 58138 v_0 := b.Controls[0] 58139 cmp := v_0.Args[0] 58140 b.Reset(BlockAMD64EQ) 58141 b.AddControl(cmp) 58142 return true 58143 } 58144 // match: (EQ (FlagEQ) yes no) 58145 // result: (First yes no) 58146 for b.Controls[0].Op == OpAMD64FlagEQ { 58147 b.Reset(BlockFirst) 58148 return true 58149 } 58150 // match: (EQ (FlagLT_ULT) yes no) 58151 // result: (First no yes) 58152 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 58153 b.Reset(BlockFirst) 58154 b.swapSuccessors() 58155 return true 58156 } 58157 // match: (EQ (FlagLT_UGT) yes no) 58158 // result: (First no yes) 58159 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 58160 b.Reset(BlockFirst) 58161 b.swapSuccessors() 58162 return true 58163 } 58164 // match: (EQ (FlagGT_ULT) yes no) 58165 // result: (First no yes) 58166 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 58167 b.Reset(BlockFirst) 58168 b.swapSuccessors() 58169 return true 58170 } 58171 // match: (EQ (FlagGT_UGT) yes no) 58172 // result: (First no yes) 58173 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 58174 b.Reset(BlockFirst) 58175 b.swapSuccessors() 58176 return true 58177 } 58178 case BlockAMD64GE: 58179 // match: (GE (InvertFlags cmp) yes no) 58180 // result: (LE cmp yes no) 58181 for b.Controls[0].Op == OpAMD64InvertFlags { 58182 v_0 := b.Controls[0] 58183 cmp := v_0.Args[0] 58184 b.Reset(BlockAMD64LE) 58185 b.AddControl(cmp) 58186 return true 58187 } 58188 // match: (GE (FlagEQ) yes no) 58189 // result: (First yes no) 58190 for b.Controls[0].Op == OpAMD64FlagEQ { 58191 b.Reset(BlockFirst) 58192 return true 58193 } 58194 // match: (GE (FlagLT_ULT) yes no) 58195 // result: (First no yes) 58196 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 58197 b.Reset(BlockFirst) 58198 b.swapSuccessors() 58199 return true 58200 } 58201 // match: (GE (FlagLT_UGT) yes no) 58202 // result: (First no yes) 58203 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 58204 b.Reset(BlockFirst) 58205 b.swapSuccessors() 58206 return true 58207 } 58208 // match: (GE (FlagGT_ULT) yes no) 58209 // result: (First yes no) 58210 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 58211 b.Reset(BlockFirst) 58212 return true 58213 } 58214 // match: (GE (FlagGT_UGT) yes no) 58215 // result: (First yes no) 58216 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 58217 b.Reset(BlockFirst) 58218 return true 58219 } 58220 case BlockAMD64GT: 58221 // match: (GT (InvertFlags cmp) yes no) 58222 // result: (LT cmp yes no) 58223 for b.Controls[0].Op == OpAMD64InvertFlags { 58224 v_0 := b.Controls[0] 58225 cmp := v_0.Args[0] 58226 b.Reset(BlockAMD64LT) 58227 b.AddControl(cmp) 58228 return true 58229 } 58230 // match: (GT (FlagEQ) yes no) 58231 // result: (First no yes) 58232 for b.Controls[0].Op == OpAMD64FlagEQ { 58233 b.Reset(BlockFirst) 58234 b.swapSuccessors() 58235 return true 58236 } 58237 // match: (GT (FlagLT_ULT) yes no) 58238 // result: (First no yes) 58239 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 58240 b.Reset(BlockFirst) 58241 b.swapSuccessors() 58242 return true 58243 } 58244 // match: (GT (FlagLT_UGT) yes no) 58245 // result: (First no yes) 58246 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 58247 b.Reset(BlockFirst) 58248 b.swapSuccessors() 58249 return true 58250 } 58251 // match: (GT (FlagGT_ULT) yes no) 58252 // result: (First yes no) 58253 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 58254 b.Reset(BlockFirst) 58255 return true 58256 } 58257 // match: (GT (FlagGT_UGT) yes no) 58258 // result: (First yes no) 58259 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 58260 b.Reset(BlockFirst) 58261 return true 58262 } 58263 case BlockIf: 58264 // match: (If (SETL cmp) yes no) 58265 // result: (LT cmp yes no) 58266 for b.Controls[0].Op == OpAMD64SETL { 58267 v_0 := b.Controls[0] 58268 cmp := v_0.Args[0] 58269 b.Reset(BlockAMD64LT) 58270 b.AddControl(cmp) 58271 return true 58272 } 58273 // match: (If (SETLE cmp) yes no) 58274 // result: (LE cmp yes no) 58275 for b.Controls[0].Op == OpAMD64SETLE { 58276 v_0 := b.Controls[0] 58277 cmp := v_0.Args[0] 58278 b.Reset(BlockAMD64LE) 58279 b.AddControl(cmp) 58280 return true 58281 } 58282 // match: (If (SETG cmp) yes no) 58283 // result: (GT cmp yes no) 58284 for b.Controls[0].Op == OpAMD64SETG { 58285 v_0 := b.Controls[0] 58286 cmp := v_0.Args[0] 58287 b.Reset(BlockAMD64GT) 58288 b.AddControl(cmp) 58289 return true 58290 } 58291 // match: (If (SETGE cmp) yes no) 58292 // result: (GE cmp yes no) 58293 for b.Controls[0].Op == OpAMD64SETGE { 58294 v_0 := b.Controls[0] 58295 cmp := v_0.Args[0] 58296 b.Reset(BlockAMD64GE) 58297 b.AddControl(cmp) 58298 return true 58299 } 58300 // match: (If (SETEQ cmp) yes no) 58301 // result: (EQ cmp yes no) 58302 for b.Controls[0].Op == OpAMD64SETEQ { 58303 v_0 := b.Controls[0] 58304 cmp := v_0.Args[0] 58305 b.Reset(BlockAMD64EQ) 58306 b.AddControl(cmp) 58307 return true 58308 } 58309 // match: (If (SETNE cmp) yes no) 58310 // result: (NE cmp yes no) 58311 for b.Controls[0].Op == OpAMD64SETNE { 58312 v_0 := b.Controls[0] 58313 cmp := v_0.Args[0] 58314 b.Reset(BlockAMD64NE) 58315 b.AddControl(cmp) 58316 return true 58317 } 58318 // match: (If (SETB cmp) yes no) 58319 // result: (ULT cmp yes no) 58320 for b.Controls[0].Op == OpAMD64SETB { 58321 v_0 := b.Controls[0] 58322 cmp := v_0.Args[0] 58323 b.Reset(BlockAMD64ULT) 58324 b.AddControl(cmp) 58325 return true 58326 } 58327 // match: (If (SETBE cmp) yes no) 58328 // result: (ULE cmp yes no) 58329 for b.Controls[0].Op == OpAMD64SETBE { 58330 v_0 := b.Controls[0] 58331 cmp := v_0.Args[0] 58332 b.Reset(BlockAMD64ULE) 58333 b.AddControl(cmp) 58334 return true 58335 } 58336 // match: (If (SETA cmp) yes no) 58337 // result: (UGT cmp yes no) 58338 for b.Controls[0].Op == OpAMD64SETA { 58339 v_0 := b.Controls[0] 58340 cmp := v_0.Args[0] 58341 b.Reset(BlockAMD64UGT) 58342 b.AddControl(cmp) 58343 return true 58344 } 58345 // match: (If (SETAE cmp) yes no) 58346 // result: (UGE cmp yes no) 58347 for b.Controls[0].Op == OpAMD64SETAE { 58348 v_0 := b.Controls[0] 58349 cmp := v_0.Args[0] 58350 b.Reset(BlockAMD64UGE) 58351 b.AddControl(cmp) 58352 return true 58353 } 58354 // match: (If (SETO cmp) yes no) 58355 // result: (OS cmp yes no) 58356 for b.Controls[0].Op == OpAMD64SETO { 58357 v_0 := b.Controls[0] 58358 cmp := v_0.Args[0] 58359 b.Reset(BlockAMD64OS) 58360 b.AddControl(cmp) 58361 return true 58362 } 58363 // match: (If (SETGF cmp) yes no) 58364 // result: (UGT cmp yes no) 58365 for b.Controls[0].Op == OpAMD64SETGF { 58366 v_0 := b.Controls[0] 58367 cmp := v_0.Args[0] 58368 b.Reset(BlockAMD64UGT) 58369 b.AddControl(cmp) 58370 return true 58371 } 58372 // match: (If (SETGEF cmp) yes no) 58373 // result: (UGE cmp yes no) 58374 for b.Controls[0].Op == OpAMD64SETGEF { 58375 v_0 := b.Controls[0] 58376 cmp := v_0.Args[0] 58377 b.Reset(BlockAMD64UGE) 58378 b.AddControl(cmp) 58379 return true 58380 } 58381 // match: (If (SETEQF cmp) yes no) 58382 // result: (EQF cmp yes no) 58383 for b.Controls[0].Op == OpAMD64SETEQF { 58384 v_0 := b.Controls[0] 58385 cmp := v_0.Args[0] 58386 b.Reset(BlockAMD64EQF) 58387 b.AddControl(cmp) 58388 return true 58389 } 58390 // match: (If (SETNEF cmp) yes no) 58391 // result: (NEF cmp yes no) 58392 for b.Controls[0].Op == OpAMD64SETNEF { 58393 v_0 := b.Controls[0] 58394 cmp := v_0.Args[0] 58395 b.Reset(BlockAMD64NEF) 58396 b.AddControl(cmp) 58397 return true 58398 } 58399 // match: (If cond yes no) 58400 // result: (NE (TESTB cond cond) yes no) 58401 for { 58402 cond := b.Controls[0] 58403 b.Reset(BlockAMD64NE) 58404 v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags) 58405 v0.AddArg(cond) 58406 v0.AddArg(cond) 58407 b.AddControl(v0) 58408 return true 58409 } 58410 case BlockAMD64LE: 58411 // match: (LE (InvertFlags cmp) yes no) 58412 // result: (GE cmp yes no) 58413 for b.Controls[0].Op == OpAMD64InvertFlags { 58414 v_0 := b.Controls[0] 58415 cmp := v_0.Args[0] 58416 b.Reset(BlockAMD64GE) 58417 b.AddControl(cmp) 58418 return true 58419 } 58420 // match: (LE (FlagEQ) yes no) 58421 // result: (First yes no) 58422 for b.Controls[0].Op == OpAMD64FlagEQ { 58423 b.Reset(BlockFirst) 58424 return true 58425 } 58426 // match: (LE (FlagLT_ULT) yes no) 58427 // result: (First yes no) 58428 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 58429 b.Reset(BlockFirst) 58430 return true 58431 } 58432 // match: (LE (FlagLT_UGT) yes no) 58433 // result: (First yes no) 58434 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 58435 b.Reset(BlockFirst) 58436 return true 58437 } 58438 // match: (LE (FlagGT_ULT) yes no) 58439 // result: (First no yes) 58440 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 58441 b.Reset(BlockFirst) 58442 b.swapSuccessors() 58443 return true 58444 } 58445 // match: (LE (FlagGT_UGT) yes no) 58446 // result: (First no yes) 58447 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 58448 b.Reset(BlockFirst) 58449 b.swapSuccessors() 58450 return true 58451 } 58452 case BlockAMD64LT: 58453 // match: (LT (InvertFlags cmp) yes no) 58454 // result: (GT cmp yes no) 58455 for b.Controls[0].Op == OpAMD64InvertFlags { 58456 v_0 := b.Controls[0] 58457 cmp := v_0.Args[0] 58458 b.Reset(BlockAMD64GT) 58459 b.AddControl(cmp) 58460 return true 58461 } 58462 // match: (LT (FlagEQ) yes no) 58463 // result: (First no yes) 58464 for b.Controls[0].Op == OpAMD64FlagEQ { 58465 b.Reset(BlockFirst) 58466 b.swapSuccessors() 58467 return true 58468 } 58469 // match: (LT (FlagLT_ULT) yes no) 58470 // result: (First yes no) 58471 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 58472 b.Reset(BlockFirst) 58473 return true 58474 } 58475 // match: (LT (FlagLT_UGT) yes no) 58476 // result: (First yes no) 58477 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 58478 b.Reset(BlockFirst) 58479 return true 58480 } 58481 // match: (LT (FlagGT_ULT) yes no) 58482 // result: (First no yes) 58483 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 58484 b.Reset(BlockFirst) 58485 b.swapSuccessors() 58486 return true 58487 } 58488 // match: (LT (FlagGT_UGT) yes no) 58489 // result: (First no yes) 58490 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 58491 b.Reset(BlockFirst) 58492 b.swapSuccessors() 58493 return true 58494 } 58495 case BlockAMD64NE: 58496 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 58497 // result: (LT cmp yes no) 58498 for b.Controls[0].Op == OpAMD64TESTB { 58499 v_0 := b.Controls[0] 58500 _ = v_0.Args[1] 58501 v_0_0 := v_0.Args[0] 58502 if v_0_0.Op != OpAMD64SETL { 58503 break 58504 } 58505 cmp := v_0_0.Args[0] 58506 v_0_1 := v_0.Args[1] 58507 if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] { 58508 break 58509 } 58510 b.Reset(BlockAMD64LT) 58511 b.AddControl(cmp) 58512 return true 58513 } 58514 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 58515 // result: (LT cmp yes no) 58516 for b.Controls[0].Op == OpAMD64TESTB { 58517 v_0 := b.Controls[0] 58518 _ = v_0.Args[1] 58519 v_0_0 := v_0.Args[0] 58520 if v_0_0.Op != OpAMD64SETL { 58521 break 58522 } 58523 cmp := v_0_0.Args[0] 58524 v_0_1 := v_0.Args[1] 58525 if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] { 58526 break 58527 } 58528 b.Reset(BlockAMD64LT) 58529 b.AddControl(cmp) 58530 return true 58531 } 58532 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 58533 // result: (LE cmp yes no) 58534 for b.Controls[0].Op == OpAMD64TESTB { 58535 v_0 := b.Controls[0] 58536 _ = v_0.Args[1] 58537 v_0_0 := v_0.Args[0] 58538 if v_0_0.Op != OpAMD64SETLE { 58539 break 58540 } 58541 cmp := v_0_0.Args[0] 58542 v_0_1 := v_0.Args[1] 58543 if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] { 58544 break 58545 } 58546 b.Reset(BlockAMD64LE) 58547 b.AddControl(cmp) 58548 return true 58549 } 58550 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 58551 // result: (LE cmp yes no) 58552 for b.Controls[0].Op == OpAMD64TESTB { 58553 v_0 := b.Controls[0] 58554 _ = v_0.Args[1] 58555 v_0_0 := v_0.Args[0] 58556 if v_0_0.Op != OpAMD64SETLE { 58557 break 58558 } 58559 cmp := v_0_0.Args[0] 58560 v_0_1 := v_0.Args[1] 58561 if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] { 58562 break 58563 } 58564 b.Reset(BlockAMD64LE) 58565 b.AddControl(cmp) 58566 return true 58567 } 58568 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 58569 // result: (GT cmp yes no) 58570 for b.Controls[0].Op == OpAMD64TESTB { 58571 v_0 := b.Controls[0] 58572 _ = v_0.Args[1] 58573 v_0_0 := v_0.Args[0] 58574 if v_0_0.Op != OpAMD64SETG { 58575 break 58576 } 58577 cmp := v_0_0.Args[0] 58578 v_0_1 := v_0.Args[1] 58579 if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] { 58580 break 58581 } 58582 b.Reset(BlockAMD64GT) 58583 b.AddControl(cmp) 58584 return true 58585 } 58586 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 58587 // result: (GT cmp yes no) 58588 for b.Controls[0].Op == OpAMD64TESTB { 58589 v_0 := b.Controls[0] 58590 _ = v_0.Args[1] 58591 v_0_0 := v_0.Args[0] 58592 if v_0_0.Op != OpAMD64SETG { 58593 break 58594 } 58595 cmp := v_0_0.Args[0] 58596 v_0_1 := v_0.Args[1] 58597 if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] { 58598 break 58599 } 58600 b.Reset(BlockAMD64GT) 58601 b.AddControl(cmp) 58602 return true 58603 } 58604 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 58605 // result: (GE cmp yes no) 58606 for b.Controls[0].Op == OpAMD64TESTB { 58607 v_0 := b.Controls[0] 58608 _ = v_0.Args[1] 58609 v_0_0 := v_0.Args[0] 58610 if v_0_0.Op != OpAMD64SETGE { 58611 break 58612 } 58613 cmp := v_0_0.Args[0] 58614 v_0_1 := v_0.Args[1] 58615 if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] { 58616 break 58617 } 58618 b.Reset(BlockAMD64GE) 58619 b.AddControl(cmp) 58620 return true 58621 } 58622 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 58623 // result: (GE cmp yes no) 58624 for b.Controls[0].Op == OpAMD64TESTB { 58625 v_0 := b.Controls[0] 58626 _ = v_0.Args[1] 58627 v_0_0 := v_0.Args[0] 58628 if v_0_0.Op != OpAMD64SETGE { 58629 break 58630 } 58631 cmp := v_0_0.Args[0] 58632 v_0_1 := v_0.Args[1] 58633 if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] { 58634 break 58635 } 58636 b.Reset(BlockAMD64GE) 58637 b.AddControl(cmp) 58638 return true 58639 } 58640 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 58641 // result: (EQ cmp yes no) 58642 for b.Controls[0].Op == OpAMD64TESTB { 58643 v_0 := b.Controls[0] 58644 _ = v_0.Args[1] 58645 v_0_0 := v_0.Args[0] 58646 if v_0_0.Op != OpAMD64SETEQ { 58647 break 58648 } 58649 cmp := v_0_0.Args[0] 58650 v_0_1 := v_0.Args[1] 58651 if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] { 58652 break 58653 } 58654 b.Reset(BlockAMD64EQ) 58655 b.AddControl(cmp) 58656 return true 58657 } 58658 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 58659 // result: (EQ cmp yes no) 58660 for b.Controls[0].Op == OpAMD64TESTB { 58661 v_0 := b.Controls[0] 58662 _ = v_0.Args[1] 58663 v_0_0 := v_0.Args[0] 58664 if v_0_0.Op != OpAMD64SETEQ { 58665 break 58666 } 58667 cmp := v_0_0.Args[0] 58668 v_0_1 := v_0.Args[1] 58669 if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] { 58670 break 58671 } 58672 b.Reset(BlockAMD64EQ) 58673 b.AddControl(cmp) 58674 return true 58675 } 58676 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 58677 // result: (NE cmp yes no) 58678 for b.Controls[0].Op == OpAMD64TESTB { 58679 v_0 := b.Controls[0] 58680 _ = v_0.Args[1] 58681 v_0_0 := v_0.Args[0] 58682 if v_0_0.Op != OpAMD64SETNE { 58683 break 58684 } 58685 cmp := v_0_0.Args[0] 58686 v_0_1 := v_0.Args[1] 58687 if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] { 58688 break 58689 } 58690 b.Reset(BlockAMD64NE) 58691 b.AddControl(cmp) 58692 return true 58693 } 58694 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 58695 // result: (NE cmp yes no) 58696 for b.Controls[0].Op == OpAMD64TESTB { 58697 v_0 := b.Controls[0] 58698 _ = v_0.Args[1] 58699 v_0_0 := v_0.Args[0] 58700 if v_0_0.Op != OpAMD64SETNE { 58701 break 58702 } 58703 cmp := v_0_0.Args[0] 58704 v_0_1 := v_0.Args[1] 58705 if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] { 58706 break 58707 } 58708 b.Reset(BlockAMD64NE) 58709 b.AddControl(cmp) 58710 return true 58711 } 58712 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 58713 // result: (ULT cmp yes no) 58714 for b.Controls[0].Op == OpAMD64TESTB { 58715 v_0 := b.Controls[0] 58716 _ = v_0.Args[1] 58717 v_0_0 := v_0.Args[0] 58718 if v_0_0.Op != OpAMD64SETB { 58719 break 58720 } 58721 cmp := v_0_0.Args[0] 58722 v_0_1 := v_0.Args[1] 58723 if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] { 58724 break 58725 } 58726 b.Reset(BlockAMD64ULT) 58727 b.AddControl(cmp) 58728 return true 58729 } 58730 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 58731 // result: (ULT cmp yes no) 58732 for b.Controls[0].Op == OpAMD64TESTB { 58733 v_0 := b.Controls[0] 58734 _ = v_0.Args[1] 58735 v_0_0 := v_0.Args[0] 58736 if v_0_0.Op != OpAMD64SETB { 58737 break 58738 } 58739 cmp := v_0_0.Args[0] 58740 v_0_1 := v_0.Args[1] 58741 if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] { 58742 break 58743 } 58744 b.Reset(BlockAMD64ULT) 58745 b.AddControl(cmp) 58746 return true 58747 } 58748 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 58749 // result: (ULE cmp yes no) 58750 for b.Controls[0].Op == OpAMD64TESTB { 58751 v_0 := b.Controls[0] 58752 _ = v_0.Args[1] 58753 v_0_0 := v_0.Args[0] 58754 if v_0_0.Op != OpAMD64SETBE { 58755 break 58756 } 58757 cmp := v_0_0.Args[0] 58758 v_0_1 := v_0.Args[1] 58759 if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] { 58760 break 58761 } 58762 b.Reset(BlockAMD64ULE) 58763 b.AddControl(cmp) 58764 return true 58765 } 58766 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 58767 // result: (ULE cmp yes no) 58768 for b.Controls[0].Op == OpAMD64TESTB { 58769 v_0 := b.Controls[0] 58770 _ = v_0.Args[1] 58771 v_0_0 := v_0.Args[0] 58772 if v_0_0.Op != OpAMD64SETBE { 58773 break 58774 } 58775 cmp := v_0_0.Args[0] 58776 v_0_1 := v_0.Args[1] 58777 if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] { 58778 break 58779 } 58780 b.Reset(BlockAMD64ULE) 58781 b.AddControl(cmp) 58782 return true 58783 } 58784 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 58785 // result: (UGT cmp yes no) 58786 for b.Controls[0].Op == OpAMD64TESTB { 58787 v_0 := b.Controls[0] 58788 _ = v_0.Args[1] 58789 v_0_0 := v_0.Args[0] 58790 if v_0_0.Op != OpAMD64SETA { 58791 break 58792 } 58793 cmp := v_0_0.Args[0] 58794 v_0_1 := v_0.Args[1] 58795 if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] { 58796 break 58797 } 58798 b.Reset(BlockAMD64UGT) 58799 b.AddControl(cmp) 58800 return true 58801 } 58802 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 58803 // result: (UGT cmp yes no) 58804 for b.Controls[0].Op == OpAMD64TESTB { 58805 v_0 := b.Controls[0] 58806 _ = v_0.Args[1] 58807 v_0_0 := v_0.Args[0] 58808 if v_0_0.Op != OpAMD64SETA { 58809 break 58810 } 58811 cmp := v_0_0.Args[0] 58812 v_0_1 := v_0.Args[1] 58813 if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] { 58814 break 58815 } 58816 b.Reset(BlockAMD64UGT) 58817 b.AddControl(cmp) 58818 return true 58819 } 58820 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 58821 // result: (UGE cmp yes no) 58822 for b.Controls[0].Op == OpAMD64TESTB { 58823 v_0 := b.Controls[0] 58824 _ = v_0.Args[1] 58825 v_0_0 := v_0.Args[0] 58826 if v_0_0.Op != OpAMD64SETAE { 58827 break 58828 } 58829 cmp := v_0_0.Args[0] 58830 v_0_1 := v_0.Args[1] 58831 if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] { 58832 break 58833 } 58834 b.Reset(BlockAMD64UGE) 58835 b.AddControl(cmp) 58836 return true 58837 } 58838 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 58839 // result: (UGE cmp yes no) 58840 for b.Controls[0].Op == OpAMD64TESTB { 58841 v_0 := b.Controls[0] 58842 _ = v_0.Args[1] 58843 v_0_0 := v_0.Args[0] 58844 if v_0_0.Op != OpAMD64SETAE { 58845 break 58846 } 58847 cmp := v_0_0.Args[0] 58848 v_0_1 := v_0.Args[1] 58849 if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] { 58850 break 58851 } 58852 b.Reset(BlockAMD64UGE) 58853 b.AddControl(cmp) 58854 return true 58855 } 58856 // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) 58857 // result: (OS cmp yes no) 58858 for b.Controls[0].Op == OpAMD64TESTB { 58859 v_0 := b.Controls[0] 58860 _ = v_0.Args[1] 58861 v_0_0 := v_0.Args[0] 58862 if v_0_0.Op != OpAMD64SETO { 58863 break 58864 } 58865 cmp := v_0_0.Args[0] 58866 v_0_1 := v_0.Args[1] 58867 if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] { 58868 break 58869 } 58870 b.Reset(BlockAMD64OS) 58871 b.AddControl(cmp) 58872 return true 58873 } 58874 // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) 58875 // result: (OS cmp yes no) 58876 for b.Controls[0].Op == OpAMD64TESTB { 58877 v_0 := b.Controls[0] 58878 _ = v_0.Args[1] 58879 v_0_0 := v_0.Args[0] 58880 if v_0_0.Op != OpAMD64SETO { 58881 break 58882 } 58883 cmp := v_0_0.Args[0] 58884 v_0_1 := v_0.Args[1] 58885 if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] { 58886 break 58887 } 58888 b.Reset(BlockAMD64OS) 58889 b.AddControl(cmp) 58890 return true 58891 } 58892 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 58893 // result: (ULT (BTL x y)) 58894 for b.Controls[0].Op == OpAMD64TESTL { 58895 v_0 := b.Controls[0] 58896 y := v_0.Args[1] 58897 v_0_0 := v_0.Args[0] 58898 if v_0_0.Op != OpAMD64SHLL { 58899 break 58900 } 58901 x := v_0_0.Args[1] 58902 v_0_0_0 := v_0_0.Args[0] 58903 if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 { 58904 break 58905 } 58906 b.Reset(BlockAMD64ULT) 58907 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) 58908 v0.AddArg(x) 58909 v0.AddArg(y) 58910 b.AddControl(v0) 58911 return true 58912 } 58913 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 58914 // result: (ULT (BTL x y)) 58915 for b.Controls[0].Op == OpAMD64TESTL { 58916 v_0 := b.Controls[0] 58917 _ = v_0.Args[1] 58918 y := v_0.Args[0] 58919 v_0_1 := v_0.Args[1] 58920 if v_0_1.Op != OpAMD64SHLL { 58921 break 58922 } 58923 x := v_0_1.Args[1] 58924 v_0_1_0 := v_0_1.Args[0] 58925 if v_0_1_0.Op != OpAMD64MOVLconst || v_0_1_0.AuxInt != 1 { 58926 break 58927 } 58928 b.Reset(BlockAMD64ULT) 58929 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) 58930 v0.AddArg(x) 58931 v0.AddArg(y) 58932 b.AddControl(v0) 58933 return true 58934 } 58935 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 58936 // result: (ULT (BTQ x y)) 58937 for b.Controls[0].Op == OpAMD64TESTQ { 58938 v_0 := b.Controls[0] 58939 y := v_0.Args[1] 58940 v_0_0 := v_0.Args[0] 58941 if v_0_0.Op != OpAMD64SHLQ { 58942 break 58943 } 58944 x := v_0_0.Args[1] 58945 v_0_0_0 := v_0_0.Args[0] 58946 if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 { 58947 break 58948 } 58949 b.Reset(BlockAMD64ULT) 58950 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) 58951 v0.AddArg(x) 58952 v0.AddArg(y) 58953 b.AddControl(v0) 58954 return true 58955 } 58956 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 58957 // result: (ULT (BTQ x y)) 58958 for b.Controls[0].Op == OpAMD64TESTQ { 58959 v_0 := b.Controls[0] 58960 _ = v_0.Args[1] 58961 y := v_0.Args[0] 58962 v_0_1 := v_0.Args[1] 58963 if v_0_1.Op != OpAMD64SHLQ { 58964 break 58965 } 58966 x := v_0_1.Args[1] 58967 v_0_1_0 := v_0_1.Args[0] 58968 if v_0_1_0.Op != OpAMD64MOVQconst || v_0_1_0.AuxInt != 1 { 58969 break 58970 } 58971 b.Reset(BlockAMD64ULT) 58972 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) 58973 v0.AddArg(x) 58974 v0.AddArg(y) 58975 b.AddControl(v0) 58976 return true 58977 } 58978 // match: (NE (TESTLconst [c] x)) 58979 // cond: isUint32PowerOfTwo(c) 58980 // result: (ULT (BTLconst [log2uint32(c)] x)) 58981 for b.Controls[0].Op == OpAMD64TESTLconst { 58982 v_0 := b.Controls[0] 58983 c := v_0.AuxInt 58984 x := v_0.Args[0] 58985 if !(isUint32PowerOfTwo(c)) { 58986 break 58987 } 58988 b.Reset(BlockAMD64ULT) 58989 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 58990 v0.AuxInt = log2uint32(c) 58991 v0.AddArg(x) 58992 b.AddControl(v0) 58993 return true 58994 } 58995 // match: (NE (TESTQconst [c] x)) 58996 // cond: isUint64PowerOfTwo(c) 58997 // result: (ULT (BTQconst [log2(c)] x)) 58998 for b.Controls[0].Op == OpAMD64TESTQconst { 58999 v_0 := b.Controls[0] 59000 c := v_0.AuxInt 59001 x := v_0.Args[0] 59002 if !(isUint64PowerOfTwo(c)) { 59003 break 59004 } 59005 b.Reset(BlockAMD64ULT) 59006 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 59007 v0.AuxInt = log2(c) 59008 v0.AddArg(x) 59009 b.AddControl(v0) 59010 return true 59011 } 59012 // match: (NE (TESTQ (MOVQconst [c]) x)) 59013 // cond: isUint64PowerOfTwo(c) 59014 // result: (ULT (BTQconst [log2(c)] x)) 59015 for b.Controls[0].Op == OpAMD64TESTQ { 59016 v_0 := b.Controls[0] 59017 x := v_0.Args[1] 59018 v_0_0 := v_0.Args[0] 59019 if v_0_0.Op != OpAMD64MOVQconst { 59020 break 59021 } 59022 c := v_0_0.AuxInt 59023 if !(isUint64PowerOfTwo(c)) { 59024 break 59025 } 59026 b.Reset(BlockAMD64ULT) 59027 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 59028 v0.AuxInt = log2(c) 59029 v0.AddArg(x) 59030 b.AddControl(v0) 59031 return true 59032 } 59033 // match: (NE (TESTQ x (MOVQconst [c]))) 59034 // cond: isUint64PowerOfTwo(c) 59035 // result: (ULT (BTQconst [log2(c)] x)) 59036 for b.Controls[0].Op == OpAMD64TESTQ { 59037 v_0 := b.Controls[0] 59038 _ = v_0.Args[1] 59039 x := v_0.Args[0] 59040 v_0_1 := v_0.Args[1] 59041 if v_0_1.Op != OpAMD64MOVQconst { 59042 break 59043 } 59044 c := v_0_1.AuxInt 59045 if !(isUint64PowerOfTwo(c)) { 59046 break 59047 } 59048 b.Reset(BlockAMD64ULT) 59049 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 59050 v0.AuxInt = log2(c) 59051 v0.AddArg(x) 59052 b.AddControl(v0) 59053 return true 59054 } 59055 // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) 59056 // cond: z1==z2 59057 // result: (ULT (BTQconst [63] x)) 59058 for b.Controls[0].Op == OpAMD64TESTQ { 59059 v_0 := b.Controls[0] 59060 z2 := v_0.Args[1] 59061 z1 := v_0.Args[0] 59062 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { 59063 break 59064 } 59065 z1_0 := z1.Args[0] 59066 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { 59067 break 59068 } 59069 x := z1_0.Args[0] 59070 if !(z1 == z2) { 59071 break 59072 } 59073 b.Reset(BlockAMD64ULT) 59074 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 59075 v0.AuxInt = 63 59076 v0.AddArg(x) 59077 b.AddControl(v0) 59078 return true 59079 } 59080 // match: (NE (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x)))) 59081 // cond: z1==z2 59082 // result: (ULT (BTQconst [63] x)) 59083 for b.Controls[0].Op == OpAMD64TESTQ { 59084 v_0 := b.Controls[0] 59085 _ = v_0.Args[1] 59086 z2 := v_0.Args[0] 59087 z1 := v_0.Args[1] 59088 if z1.Op != OpAMD64SHLQconst || z1.AuxInt != 63 { 59089 break 59090 } 59091 z1_0 := z1.Args[0] 59092 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 63 { 59093 break 59094 } 59095 x := z1_0.Args[0] 59096 if !(z1 == z2) { 59097 break 59098 } 59099 b.Reset(BlockAMD64ULT) 59100 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 59101 v0.AuxInt = 63 59102 v0.AddArg(x) 59103 b.AddControl(v0) 59104 return true 59105 } 59106 // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) 59107 // cond: z1==z2 59108 // result: (ULT (BTQconst [31] x)) 59109 for b.Controls[0].Op == OpAMD64TESTL { 59110 v_0 := b.Controls[0] 59111 z2 := v_0.Args[1] 59112 z1 := v_0.Args[0] 59113 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { 59114 break 59115 } 59116 z1_0 := z1.Args[0] 59117 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { 59118 break 59119 } 59120 x := z1_0.Args[0] 59121 if !(z1 == z2) { 59122 break 59123 } 59124 b.Reset(BlockAMD64ULT) 59125 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 59126 v0.AuxInt = 31 59127 v0.AddArg(x) 59128 b.AddControl(v0) 59129 return true 59130 } 59131 // match: (NE (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x)))) 59132 // cond: z1==z2 59133 // result: (ULT (BTQconst [31] x)) 59134 for b.Controls[0].Op == OpAMD64TESTL { 59135 v_0 := b.Controls[0] 59136 _ = v_0.Args[1] 59137 z2 := v_0.Args[0] 59138 z1 := v_0.Args[1] 59139 if z1.Op != OpAMD64SHLLconst || z1.AuxInt != 31 { 59140 break 59141 } 59142 z1_0 := z1.Args[0] 59143 if z1_0.Op != OpAMD64SHRQconst || z1_0.AuxInt != 31 { 59144 break 59145 } 59146 x := z1_0.Args[0] 59147 if !(z1 == z2) { 59148 break 59149 } 59150 b.Reset(BlockAMD64ULT) 59151 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 59152 v0.AuxInt = 31 59153 v0.AddArg(x) 59154 b.AddControl(v0) 59155 return true 59156 } 59157 // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) 59158 // cond: z1==z2 59159 // result: (ULT (BTQconst [0] x)) 59160 for b.Controls[0].Op == OpAMD64TESTQ { 59161 v_0 := b.Controls[0] 59162 z2 := v_0.Args[1] 59163 z1 := v_0.Args[0] 59164 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 59165 break 59166 } 59167 z1_0 := z1.Args[0] 59168 if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { 59169 break 59170 } 59171 x := z1_0.Args[0] 59172 if !(z1 == z2) { 59173 break 59174 } 59175 b.Reset(BlockAMD64ULT) 59176 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 59177 v0.AuxInt = 0 59178 v0.AddArg(x) 59179 b.AddControl(v0) 59180 return true 59181 } 59182 // match: (NE (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x)))) 59183 // cond: z1==z2 59184 // result: (ULT (BTQconst [0] x)) 59185 for b.Controls[0].Op == OpAMD64TESTQ { 59186 v_0 := b.Controls[0] 59187 _ = v_0.Args[1] 59188 z2 := v_0.Args[0] 59189 z1 := v_0.Args[1] 59190 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 59191 break 59192 } 59193 z1_0 := z1.Args[0] 59194 if z1_0.Op != OpAMD64SHLQconst || z1_0.AuxInt != 63 { 59195 break 59196 } 59197 x := z1_0.Args[0] 59198 if !(z1 == z2) { 59199 break 59200 } 59201 b.Reset(BlockAMD64ULT) 59202 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 59203 v0.AuxInt = 0 59204 v0.AddArg(x) 59205 b.AddControl(v0) 59206 return true 59207 } 59208 // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) 59209 // cond: z1==z2 59210 // result: (ULT (BTLconst [0] x)) 59211 for b.Controls[0].Op == OpAMD64TESTL { 59212 v_0 := b.Controls[0] 59213 z2 := v_0.Args[1] 59214 z1 := v_0.Args[0] 59215 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 59216 break 59217 } 59218 z1_0 := z1.Args[0] 59219 if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { 59220 break 59221 } 59222 x := z1_0.Args[0] 59223 if !(z1 == z2) { 59224 break 59225 } 59226 b.Reset(BlockAMD64ULT) 59227 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 59228 v0.AuxInt = 0 59229 v0.AddArg(x) 59230 b.AddControl(v0) 59231 return true 59232 } 59233 // match: (NE (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x)))) 59234 // cond: z1==z2 59235 // result: (ULT (BTLconst [0] x)) 59236 for b.Controls[0].Op == OpAMD64TESTL { 59237 v_0 := b.Controls[0] 59238 _ = v_0.Args[1] 59239 z2 := v_0.Args[0] 59240 z1 := v_0.Args[1] 59241 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 59242 break 59243 } 59244 z1_0 := z1.Args[0] 59245 if z1_0.Op != OpAMD64SHLLconst || z1_0.AuxInt != 31 { 59246 break 59247 } 59248 x := z1_0.Args[0] 59249 if !(z1 == z2) { 59250 break 59251 } 59252 b.Reset(BlockAMD64ULT) 59253 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 59254 v0.AuxInt = 0 59255 v0.AddArg(x) 59256 b.AddControl(v0) 59257 return true 59258 } 59259 // match: (NE (TESTQ z1:(SHRQconst [63] x) z2)) 59260 // cond: z1==z2 59261 // result: (ULT (BTQconst [63] x)) 59262 for b.Controls[0].Op == OpAMD64TESTQ { 59263 v_0 := b.Controls[0] 59264 z2 := v_0.Args[1] 59265 z1 := v_0.Args[0] 59266 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 59267 break 59268 } 59269 x := z1.Args[0] 59270 if !(z1 == z2) { 59271 break 59272 } 59273 b.Reset(BlockAMD64ULT) 59274 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 59275 v0.AuxInt = 63 59276 v0.AddArg(x) 59277 b.AddControl(v0) 59278 return true 59279 } 59280 // match: (NE (TESTQ z2 z1:(SHRQconst [63] x))) 59281 // cond: z1==z2 59282 // result: (ULT (BTQconst [63] x)) 59283 for b.Controls[0].Op == OpAMD64TESTQ { 59284 v_0 := b.Controls[0] 59285 _ = v_0.Args[1] 59286 z2 := v_0.Args[0] 59287 z1 := v_0.Args[1] 59288 if z1.Op != OpAMD64SHRQconst || z1.AuxInt != 63 { 59289 break 59290 } 59291 x := z1.Args[0] 59292 if !(z1 == z2) { 59293 break 59294 } 59295 b.Reset(BlockAMD64ULT) 59296 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) 59297 v0.AuxInt = 63 59298 v0.AddArg(x) 59299 b.AddControl(v0) 59300 return true 59301 } 59302 // match: (NE (TESTL z1:(SHRLconst [31] x) z2)) 59303 // cond: z1==z2 59304 // result: (ULT (BTLconst [31] x)) 59305 for b.Controls[0].Op == OpAMD64TESTL { 59306 v_0 := b.Controls[0] 59307 z2 := v_0.Args[1] 59308 z1 := v_0.Args[0] 59309 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 59310 break 59311 } 59312 x := z1.Args[0] 59313 if !(z1 == z2) { 59314 break 59315 } 59316 b.Reset(BlockAMD64ULT) 59317 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 59318 v0.AuxInt = 31 59319 v0.AddArg(x) 59320 b.AddControl(v0) 59321 return true 59322 } 59323 // match: (NE (TESTL z2 z1:(SHRLconst [31] x))) 59324 // cond: z1==z2 59325 // result: (ULT (BTLconst [31] x)) 59326 for b.Controls[0].Op == OpAMD64TESTL { 59327 v_0 := b.Controls[0] 59328 _ = v_0.Args[1] 59329 z2 := v_0.Args[0] 59330 z1 := v_0.Args[1] 59331 if z1.Op != OpAMD64SHRLconst || z1.AuxInt != 31 { 59332 break 59333 } 59334 x := z1.Args[0] 59335 if !(z1 == z2) { 59336 break 59337 } 59338 b.Reset(BlockAMD64ULT) 59339 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) 59340 v0.AuxInt = 31 59341 v0.AddArg(x) 59342 b.AddControl(v0) 59343 return true 59344 } 59345 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 59346 // result: (UGT cmp yes no) 59347 for b.Controls[0].Op == OpAMD64TESTB { 59348 v_0 := b.Controls[0] 59349 _ = v_0.Args[1] 59350 v_0_0 := v_0.Args[0] 59351 if v_0_0.Op != OpAMD64SETGF { 59352 break 59353 } 59354 cmp := v_0_0.Args[0] 59355 v_0_1 := v_0.Args[1] 59356 if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] { 59357 break 59358 } 59359 b.Reset(BlockAMD64UGT) 59360 b.AddControl(cmp) 59361 return true 59362 } 59363 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 59364 // result: (UGT cmp yes no) 59365 for b.Controls[0].Op == OpAMD64TESTB { 59366 v_0 := b.Controls[0] 59367 _ = v_0.Args[1] 59368 v_0_0 := v_0.Args[0] 59369 if v_0_0.Op != OpAMD64SETGF { 59370 break 59371 } 59372 cmp := v_0_0.Args[0] 59373 v_0_1 := v_0.Args[1] 59374 if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] { 59375 break 59376 } 59377 b.Reset(BlockAMD64UGT) 59378 b.AddControl(cmp) 59379 return true 59380 } 59381 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 59382 // result: (UGE cmp yes no) 59383 for b.Controls[0].Op == OpAMD64TESTB { 59384 v_0 := b.Controls[0] 59385 _ = v_0.Args[1] 59386 v_0_0 := v_0.Args[0] 59387 if v_0_0.Op != OpAMD64SETGEF { 59388 break 59389 } 59390 cmp := v_0_0.Args[0] 59391 v_0_1 := v_0.Args[1] 59392 if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] { 59393 break 59394 } 59395 b.Reset(BlockAMD64UGE) 59396 b.AddControl(cmp) 59397 return true 59398 } 59399 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 59400 // result: (UGE cmp yes no) 59401 for b.Controls[0].Op == OpAMD64TESTB { 59402 v_0 := b.Controls[0] 59403 _ = v_0.Args[1] 59404 v_0_0 := v_0.Args[0] 59405 if v_0_0.Op != OpAMD64SETGEF { 59406 break 59407 } 59408 cmp := v_0_0.Args[0] 59409 v_0_1 := v_0.Args[1] 59410 if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] { 59411 break 59412 } 59413 b.Reset(BlockAMD64UGE) 59414 b.AddControl(cmp) 59415 return true 59416 } 59417 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 59418 // result: (EQF cmp yes no) 59419 for b.Controls[0].Op == OpAMD64TESTB { 59420 v_0 := b.Controls[0] 59421 _ = v_0.Args[1] 59422 v_0_0 := v_0.Args[0] 59423 if v_0_0.Op != OpAMD64SETEQF { 59424 break 59425 } 59426 cmp := v_0_0.Args[0] 59427 v_0_1 := v_0.Args[1] 59428 if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] { 59429 break 59430 } 59431 b.Reset(BlockAMD64EQF) 59432 b.AddControl(cmp) 59433 return true 59434 } 59435 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 59436 // result: (EQF cmp yes no) 59437 for b.Controls[0].Op == OpAMD64TESTB { 59438 v_0 := b.Controls[0] 59439 _ = v_0.Args[1] 59440 v_0_0 := v_0.Args[0] 59441 if v_0_0.Op != OpAMD64SETEQF { 59442 break 59443 } 59444 cmp := v_0_0.Args[0] 59445 v_0_1 := v_0.Args[1] 59446 if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] { 59447 break 59448 } 59449 b.Reset(BlockAMD64EQF) 59450 b.AddControl(cmp) 59451 return true 59452 } 59453 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 59454 // result: (NEF cmp yes no) 59455 for b.Controls[0].Op == OpAMD64TESTB { 59456 v_0 := b.Controls[0] 59457 _ = v_0.Args[1] 59458 v_0_0 := v_0.Args[0] 59459 if v_0_0.Op != OpAMD64SETNEF { 59460 break 59461 } 59462 cmp := v_0_0.Args[0] 59463 v_0_1 := v_0.Args[1] 59464 if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] { 59465 break 59466 } 59467 b.Reset(BlockAMD64NEF) 59468 b.AddControl(cmp) 59469 return true 59470 } 59471 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 59472 // result: (NEF cmp yes no) 59473 for b.Controls[0].Op == OpAMD64TESTB { 59474 v_0 := b.Controls[0] 59475 _ = v_0.Args[1] 59476 v_0_0 := v_0.Args[0] 59477 if v_0_0.Op != OpAMD64SETNEF { 59478 break 59479 } 59480 cmp := v_0_0.Args[0] 59481 v_0_1 := v_0.Args[1] 59482 if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] { 59483 break 59484 } 59485 b.Reset(BlockAMD64NEF) 59486 b.AddControl(cmp) 59487 return true 59488 } 59489 // match: (NE (InvertFlags cmp) yes no) 59490 // result: (NE cmp yes no) 59491 for b.Controls[0].Op == OpAMD64InvertFlags { 59492 v_0 := b.Controls[0] 59493 cmp := v_0.Args[0] 59494 b.Reset(BlockAMD64NE) 59495 b.AddControl(cmp) 59496 return true 59497 } 59498 // match: (NE (FlagEQ) yes no) 59499 // result: (First no yes) 59500 for b.Controls[0].Op == OpAMD64FlagEQ { 59501 b.Reset(BlockFirst) 59502 b.swapSuccessors() 59503 return true 59504 } 59505 // match: (NE (FlagLT_ULT) yes no) 59506 // result: (First yes no) 59507 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 59508 b.Reset(BlockFirst) 59509 return true 59510 } 59511 // match: (NE (FlagLT_UGT) yes no) 59512 // result: (First yes no) 59513 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 59514 b.Reset(BlockFirst) 59515 return true 59516 } 59517 // match: (NE (FlagGT_ULT) yes no) 59518 // result: (First yes no) 59519 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 59520 b.Reset(BlockFirst) 59521 return true 59522 } 59523 // match: (NE (FlagGT_UGT) yes no) 59524 // result: (First yes no) 59525 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 59526 b.Reset(BlockFirst) 59527 return true 59528 } 59529 case BlockAMD64UGE: 59530 // match: (UGE (InvertFlags cmp) yes no) 59531 // result: (ULE cmp yes no) 59532 for b.Controls[0].Op == OpAMD64InvertFlags { 59533 v_0 := b.Controls[0] 59534 cmp := v_0.Args[0] 59535 b.Reset(BlockAMD64ULE) 59536 b.AddControl(cmp) 59537 return true 59538 } 59539 // match: (UGE (FlagEQ) yes no) 59540 // result: (First yes no) 59541 for b.Controls[0].Op == OpAMD64FlagEQ { 59542 b.Reset(BlockFirst) 59543 return true 59544 } 59545 // match: (UGE (FlagLT_ULT) yes no) 59546 // result: (First no yes) 59547 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 59548 b.Reset(BlockFirst) 59549 b.swapSuccessors() 59550 return true 59551 } 59552 // match: (UGE (FlagLT_UGT) yes no) 59553 // result: (First yes no) 59554 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 59555 b.Reset(BlockFirst) 59556 return true 59557 } 59558 // match: (UGE (FlagGT_ULT) yes no) 59559 // result: (First no yes) 59560 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 59561 b.Reset(BlockFirst) 59562 b.swapSuccessors() 59563 return true 59564 } 59565 // match: (UGE (FlagGT_UGT) yes no) 59566 // result: (First yes no) 59567 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 59568 b.Reset(BlockFirst) 59569 return true 59570 } 59571 case BlockAMD64UGT: 59572 // match: (UGT (InvertFlags cmp) yes no) 59573 // result: (ULT cmp yes no) 59574 for b.Controls[0].Op == OpAMD64InvertFlags { 59575 v_0 := b.Controls[0] 59576 cmp := v_0.Args[0] 59577 b.Reset(BlockAMD64ULT) 59578 b.AddControl(cmp) 59579 return true 59580 } 59581 // match: (UGT (FlagEQ) yes no) 59582 // result: (First no yes) 59583 for b.Controls[0].Op == OpAMD64FlagEQ { 59584 b.Reset(BlockFirst) 59585 b.swapSuccessors() 59586 return true 59587 } 59588 // match: (UGT (FlagLT_ULT) yes no) 59589 // result: (First no yes) 59590 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 59591 b.Reset(BlockFirst) 59592 b.swapSuccessors() 59593 return true 59594 } 59595 // match: (UGT (FlagLT_UGT) yes no) 59596 // result: (First yes no) 59597 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 59598 b.Reset(BlockFirst) 59599 return true 59600 } 59601 // match: (UGT (FlagGT_ULT) yes no) 59602 // result: (First no yes) 59603 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 59604 b.Reset(BlockFirst) 59605 b.swapSuccessors() 59606 return true 59607 } 59608 // match: (UGT (FlagGT_UGT) yes no) 59609 // result: (First yes no) 59610 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 59611 b.Reset(BlockFirst) 59612 return true 59613 } 59614 case BlockAMD64ULE: 59615 // match: (ULE (InvertFlags cmp) yes no) 59616 // result: (UGE cmp yes no) 59617 for b.Controls[0].Op == OpAMD64InvertFlags { 59618 v_0 := b.Controls[0] 59619 cmp := v_0.Args[0] 59620 b.Reset(BlockAMD64UGE) 59621 b.AddControl(cmp) 59622 return true 59623 } 59624 // match: (ULE (FlagEQ) yes no) 59625 // result: (First yes no) 59626 for b.Controls[0].Op == OpAMD64FlagEQ { 59627 b.Reset(BlockFirst) 59628 return true 59629 } 59630 // match: (ULE (FlagLT_ULT) yes no) 59631 // result: (First yes no) 59632 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 59633 b.Reset(BlockFirst) 59634 return true 59635 } 59636 // match: (ULE (FlagLT_UGT) yes no) 59637 // result: (First no yes) 59638 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 59639 b.Reset(BlockFirst) 59640 b.swapSuccessors() 59641 return true 59642 } 59643 // match: (ULE (FlagGT_ULT) yes no) 59644 // result: (First yes no) 59645 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 59646 b.Reset(BlockFirst) 59647 return true 59648 } 59649 // match: (ULE (FlagGT_UGT) yes no) 59650 // result: (First no yes) 59651 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 59652 b.Reset(BlockFirst) 59653 b.swapSuccessors() 59654 return true 59655 } 59656 case BlockAMD64ULT: 59657 // match: (ULT (InvertFlags cmp) yes no) 59658 // result: (UGT cmp yes no) 59659 for b.Controls[0].Op == OpAMD64InvertFlags { 59660 v_0 := b.Controls[0] 59661 cmp := v_0.Args[0] 59662 b.Reset(BlockAMD64UGT) 59663 b.AddControl(cmp) 59664 return true 59665 } 59666 // match: (ULT (FlagEQ) yes no) 59667 // result: (First no yes) 59668 for b.Controls[0].Op == OpAMD64FlagEQ { 59669 b.Reset(BlockFirst) 59670 b.swapSuccessors() 59671 return true 59672 } 59673 // match: (ULT (FlagLT_ULT) yes no) 59674 // result: (First yes no) 59675 for b.Controls[0].Op == OpAMD64FlagLT_ULT { 59676 b.Reset(BlockFirst) 59677 return true 59678 } 59679 // match: (ULT (FlagLT_UGT) yes no) 59680 // result: (First no yes) 59681 for b.Controls[0].Op == OpAMD64FlagLT_UGT { 59682 b.Reset(BlockFirst) 59683 b.swapSuccessors() 59684 return true 59685 } 59686 // match: (ULT (FlagGT_ULT) yes no) 59687 // result: (First yes no) 59688 for b.Controls[0].Op == OpAMD64FlagGT_ULT { 59689 b.Reset(BlockFirst) 59690 return true 59691 } 59692 // match: (ULT (FlagGT_UGT) yes no) 59693 // result: (First no yes) 59694 for b.Controls[0].Op == OpAMD64FlagGT_UGT { 59695 b.Reset(BlockFirst) 59696 b.swapSuccessors() 59697 return true 59698 } 59699 } 59700 return false 59701 }